blob: 3b12cf947aba1e3516a6072584bf29d9d4e28fe1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
21#include <linux/config.h>
22#include <linux/kernel.h>
23#include <linux/syscalls.h>
24#include <linux/fs.h>
25#include <linux/mm.h>
26#include <linux/percpu.h>
27#include <linux/slab.h>
28#include <linux/smp_lock.h>
29#include <linux/blkdev.h>
30#include <linux/file.h>
31#include <linux/quotaops.h>
32#include <linux/highmem.h>
33#include <linux/module.h>
34#include <linux/writeback.h>
35#include <linux/hash.h>
36#include <linux/suspend.h>
37#include <linux/buffer_head.h>
38#include <linux/bio.h>
39#include <linux/notifier.h>
40#include <linux/cpu.h>
41#include <linux/bitops.h>
42#include <linux/mpage.h>
43
44static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
45static void invalidate_bh_lrus(void);
46
47#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49inline void
50init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51{
52 bh->b_end_io = handler;
53 bh->b_private = private;
54}
55
56static int sync_buffer(void *word)
57{
58 struct block_device *bd;
59 struct buffer_head *bh
60 = container_of(word, struct buffer_head, b_state);
61
62 smp_mb();
63 bd = bh->b_bdev;
64 if (bd)
65 blk_run_address_space(bd->bd_inode->i_mapping);
66 io_schedule();
67 return 0;
68}
69
70void fastcall __lock_buffer(struct buffer_head *bh)
71{
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 TASK_UNINTERRUPTIBLE);
74}
75EXPORT_SYMBOL(__lock_buffer);
76
77void fastcall unlock_buffer(struct buffer_head *bh)
78{
79 clear_buffer_locked(bh);
80 smp_mb__after_clear_bit();
81 wake_up_bit(&bh->b_state, BH_Lock);
82}
83
84/*
85 * Block until a buffer comes unlocked. This doesn't stop it
86 * from becoming locked again - you have to lock it yourself
87 * if you want to preserve its state.
88 */
89void __wait_on_buffer(struct buffer_head * bh)
90{
91 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
92}
93
94static void
95__clear_page_buffers(struct page *page)
96{
97 ClearPagePrivate(page);
98 page->private = 0;
99 page_cache_release(page);
100}
101
102static void buffer_io_error(struct buffer_head *bh)
103{
104 char b[BDEVNAME_SIZE];
105
106 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
107 bdevname(bh->b_bdev, b),
108 (unsigned long long)bh->b_blocknr);
109}
110
111/*
112 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
113 * unlock the buffer. This is what ll_rw_block uses too.
114 */
115void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
116{
117 if (uptodate) {
118 set_buffer_uptodate(bh);
119 } else {
120 /* This happens, due to failed READA attempts. */
121 clear_buffer_uptodate(bh);
122 }
123 unlock_buffer(bh);
124 put_bh(bh);
125}
126
127void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
128{
129 char b[BDEVNAME_SIZE];
130
131 if (uptodate) {
132 set_buffer_uptodate(bh);
133 } else {
134 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
135 buffer_io_error(bh);
136 printk(KERN_WARNING "lost page write due to "
137 "I/O error on %s\n",
138 bdevname(bh->b_bdev, b));
139 }
140 set_buffer_write_io_error(bh);
141 clear_buffer_uptodate(bh);
142 }
143 unlock_buffer(bh);
144 put_bh(bh);
145}
146
147/*
148 * Write out and wait upon all the dirty data associated with a block
149 * device via its mapping. Does not take the superblock lock.
150 */
151int sync_blockdev(struct block_device *bdev)
152{
153 int ret = 0;
154
155 if (bdev) {
156 int err;
157
158 ret = filemap_fdatawrite(bdev->bd_inode->i_mapping);
159 err = filemap_fdatawait(bdev->bd_inode->i_mapping);
160 if (!ret)
161 ret = err;
162 }
163 return ret;
164}
165EXPORT_SYMBOL(sync_blockdev);
166
167/*
168 * Write out and wait upon all dirty data associated with this
169 * superblock. Filesystem data as well as the underlying block
170 * device. Takes the superblock lock.
171 */
172int fsync_super(struct super_block *sb)
173{
174 sync_inodes_sb(sb, 0);
175 DQUOT_SYNC(sb);
176 lock_super(sb);
177 if (sb->s_dirt && sb->s_op->write_super)
178 sb->s_op->write_super(sb);
179 unlock_super(sb);
180 if (sb->s_op->sync_fs)
181 sb->s_op->sync_fs(sb, 1);
182 sync_blockdev(sb->s_bdev);
183 sync_inodes_sb(sb, 1);
184
185 return sync_blockdev(sb->s_bdev);
186}
187
188/*
189 * Write out and wait upon all dirty data associated with this
190 * device. Filesystem data as well as the underlying block
191 * device. Takes the superblock lock.
192 */
193int fsync_bdev(struct block_device *bdev)
194{
195 struct super_block *sb = get_super(bdev);
196 if (sb) {
197 int res = fsync_super(sb);
198 drop_super(sb);
199 return res;
200 }
201 return sync_blockdev(bdev);
202}
203
204/**
205 * freeze_bdev -- lock a filesystem and force it into a consistent state
206 * @bdev: blockdevice to lock
207 *
208 * This takes the block device bd_mount_sem to make sure no new mounts
209 * happen on bdev until thaw_bdev() is called.
210 * If a superblock is found on this device, we take the s_umount semaphore
211 * on it to make sure nobody unmounts until the snapshot creation is done.
212 */
213struct super_block *freeze_bdev(struct block_device *bdev)
214{
215 struct super_block *sb;
216
217 down(&bdev->bd_mount_sem);
218 sb = get_super(bdev);
219 if (sb && !(sb->s_flags & MS_RDONLY)) {
220 sb->s_frozen = SB_FREEZE_WRITE;
221 wmb();
222
223 sync_inodes_sb(sb, 0);
224 DQUOT_SYNC(sb);
225
226 lock_super(sb);
227 if (sb->s_dirt && sb->s_op->write_super)
228 sb->s_op->write_super(sb);
229 unlock_super(sb);
230
231 if (sb->s_op->sync_fs)
232 sb->s_op->sync_fs(sb, 1);
233
234 sync_blockdev(sb->s_bdev);
235 sync_inodes_sb(sb, 1);
236
237 sb->s_frozen = SB_FREEZE_TRANS;
238 wmb();
239
240 sync_blockdev(sb->s_bdev);
241
242 if (sb->s_op->write_super_lockfs)
243 sb->s_op->write_super_lockfs(sb);
244 }
245
246 sync_blockdev(bdev);
247 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
248}
249EXPORT_SYMBOL(freeze_bdev);
250
251/**
252 * thaw_bdev -- unlock filesystem
253 * @bdev: blockdevice to unlock
254 * @sb: associated superblock
255 *
256 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
257 */
258void thaw_bdev(struct block_device *bdev, struct super_block *sb)
259{
260 if (sb) {
261 BUG_ON(sb->s_bdev != bdev);
262
263 if (sb->s_op->unlockfs)
264 sb->s_op->unlockfs(sb);
265 sb->s_frozen = SB_UNFROZEN;
266 wmb();
267 wake_up(&sb->s_wait_unfrozen);
268 drop_super(sb);
269 }
270
271 up(&bdev->bd_mount_sem);
272}
273EXPORT_SYMBOL(thaw_bdev);
274
275/*
276 * sync everything. Start out by waking pdflush, because that writes back
277 * all queues in parallel.
278 */
279static void do_sync(unsigned long wait)
280{
281 wakeup_bdflush(0);
282 sync_inodes(0); /* All mappings, inodes and their blockdevs */
283 DQUOT_SYNC(NULL);
284 sync_supers(); /* Write the superblocks */
285 sync_filesystems(0); /* Start syncing the filesystems */
286 sync_filesystems(wait); /* Waitingly sync the filesystems */
287 sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
288 if (!wait)
289 printk("Emergency Sync complete\n");
290 if (unlikely(laptop_mode))
291 laptop_sync_completion();
292}
293
294asmlinkage long sys_sync(void)
295{
296 do_sync(1);
297 return 0;
298}
299
300void emergency_sync(void)
301{
302 pdflush_operation(do_sync, 0);
303}
304
305/*
306 * Generic function to fsync a file.
307 *
308 * filp may be NULL if called via the msync of a vma.
309 */
310
311int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
312{
313 struct inode * inode = dentry->d_inode;
314 struct super_block * sb;
315 int ret, err;
316
317 /* sync the inode to buffers */
318 ret = write_inode_now(inode, 0);
319
320 /* sync the superblock to buffers */
321 sb = inode->i_sb;
322 lock_super(sb);
323 if (sb->s_op->write_super)
324 sb->s_op->write_super(sb);
325 unlock_super(sb);
326
327 /* .. finally sync the buffers to disk */
328 err = sync_blockdev(sb->s_bdev);
329 if (!ret)
330 ret = err;
331 return ret;
332}
333
334asmlinkage long sys_fsync(unsigned int fd)
335{
336 struct file * file;
337 struct address_space *mapping;
338 int ret, err;
339
340 ret = -EBADF;
341 file = fget(fd);
342 if (!file)
343 goto out;
344
345 mapping = file->f_mapping;
346
347 ret = -EINVAL;
348 if (!file->f_op || !file->f_op->fsync) {
349 /* Why? We can still call filemap_fdatawrite */
350 goto out_putf;
351 }
352
353 current->flags |= PF_SYNCWRITE;
354 ret = filemap_fdatawrite(mapping);
355
356 /*
357 * We need to protect against concurrent writers,
358 * which could cause livelocks in fsync_buffers_list
359 */
360 down(&mapping->host->i_sem);
361 err = file->f_op->fsync(file, file->f_dentry, 0);
362 if (!ret)
363 ret = err;
364 up(&mapping->host->i_sem);
365 err = filemap_fdatawait(mapping);
366 if (!ret)
367 ret = err;
368 current->flags &= ~PF_SYNCWRITE;
369
370out_putf:
371 fput(file);
372out:
373 return ret;
374}
375
376asmlinkage long sys_fdatasync(unsigned int fd)
377{
378 struct file * file;
379 struct address_space *mapping;
380 int ret, err;
381
382 ret = -EBADF;
383 file = fget(fd);
384 if (!file)
385 goto out;
386
387 ret = -EINVAL;
388 if (!file->f_op || !file->f_op->fsync)
389 goto out_putf;
390
391 mapping = file->f_mapping;
392
393 current->flags |= PF_SYNCWRITE;
394 ret = filemap_fdatawrite(mapping);
395 down(&mapping->host->i_sem);
396 err = file->f_op->fsync(file, file->f_dentry, 1);
397 if (!ret)
398 ret = err;
399 up(&mapping->host->i_sem);
400 err = filemap_fdatawait(mapping);
401 if (!ret)
402 ret = err;
403 current->flags &= ~PF_SYNCWRITE;
404
405out_putf:
406 fput(file);
407out:
408 return ret;
409}
410
411/*
412 * Various filesystems appear to want __find_get_block to be non-blocking.
413 * But it's the page lock which protects the buffers. To get around this,
414 * we get exclusion from try_to_free_buffers with the blockdev mapping's
415 * private_lock.
416 *
417 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
418 * may be quite high. This code could TryLock the page, and if that
419 * succeeds, there is no need to take private_lock. (But if
420 * private_lock is contended then so is mapping->tree_lock).
421 */
422static struct buffer_head *
423__find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
424{
425 struct inode *bd_inode = bdev->bd_inode;
426 struct address_space *bd_mapping = bd_inode->i_mapping;
427 struct buffer_head *ret = NULL;
428 pgoff_t index;
429 struct buffer_head *bh;
430 struct buffer_head *head;
431 struct page *page;
432 int all_mapped = 1;
433
434 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
435 page = find_get_page(bd_mapping, index);
436 if (!page)
437 goto out;
438
439 spin_lock(&bd_mapping->private_lock);
440 if (!page_has_buffers(page))
441 goto out_unlock;
442 head = page_buffers(page);
443 bh = head;
444 do {
445 if (bh->b_blocknr == block) {
446 ret = bh;
447 get_bh(bh);
448 goto out_unlock;
449 }
450 if (!buffer_mapped(bh))
451 all_mapped = 0;
452 bh = bh->b_this_page;
453 } while (bh != head);
454
455 /* we might be here because some of the buffers on this page are
456 * not mapped. This is due to various races between
457 * file io on the block device and getblk. It gets dealt with
458 * elsewhere, don't buffer_error if we had some unmapped buffers
459 */
460 if (all_mapped) {
461 printk("__find_get_block_slow() failed. "
462 "block=%llu, b_blocknr=%llu\n",
463 (unsigned long long)block, (unsigned long long)bh->b_blocknr);
464 printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
465 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
466 }
467out_unlock:
468 spin_unlock(&bd_mapping->private_lock);
469 page_cache_release(page);
470out:
471 return ret;
472}
473
474/* If invalidate_buffers() will trash dirty buffers, it means some kind
475 of fs corruption is going on. Trashing dirty data always imply losing
476 information that was supposed to be just stored on the physical layer
477 by the user.
478
479 Thus invalidate_buffers in general usage is not allwowed to trash
480 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
481 be preserved. These buffers are simply skipped.
482
483 We also skip buffers which are still in use. For example this can
484 happen if a userspace program is reading the block device.
485
486 NOTE: In the case where the user removed a removable-media-disk even if
487 there's still dirty data not synced on disk (due a bug in the device driver
488 or due an error of the user), by not destroying the dirty buffers we could
489 generate corruption also on the next media inserted, thus a parameter is
490 necessary to handle this case in the most safe way possible (trying
491 to not corrupt also the new disk inserted with the data belonging to
492 the old now corrupted disk). Also for the ramdisk the natural thing
493 to do in order to release the ramdisk memory is to destroy dirty buffers.
494
495 These are two special cases. Normal usage imply the device driver
496 to issue a sync on the device (without waiting I/O completion) and
497 then an invalidate_buffers call that doesn't trash dirty buffers.
498
499 For handling cache coherency with the blkdev pagecache the 'update' case
500 is been introduced. It is needed to re-read from disk any pinned
501 buffer. NOTE: re-reading from disk is destructive so we can do it only
502 when we assume nobody is changing the buffercache under our I/O and when
503 we think the disk contains more recent information than the buffercache.
504 The update == 1 pass marks the buffers we need to update, the update == 2
505 pass does the actual I/O. */
506void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
507{
508 invalidate_bh_lrus();
509 /*
510 * FIXME: what about destroy_dirty_buffers?
511 * We really want to use invalidate_inode_pages2() for
512 * that, but not until that's cleaned up.
513 */
514 invalidate_inode_pages(bdev->bd_inode->i_mapping);
515}
516
517/*
518 * Kick pdflush then try to free up some ZONE_NORMAL memory.
519 */
520static void free_more_memory(void)
521{
522 struct zone **zones;
523 pg_data_t *pgdat;
524
525 wakeup_bdflush(1024);
526 yield();
527
528 for_each_pgdat(pgdat) {
529 zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones;
530 if (*zones)
531 try_to_free_pages(zones, GFP_NOFS, 0);
532 }
533}
534
535/*
536 * I/O completion handler for block_read_full_page() - pages
537 * which come unlocked at the end of I/O.
538 */
539static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
540{
541 static DEFINE_SPINLOCK(page_uptodate_lock);
542 unsigned long flags;
543 struct buffer_head *tmp;
544 struct page *page;
545 int page_uptodate = 1;
546
547 BUG_ON(!buffer_async_read(bh));
548
549 page = bh->b_page;
550 if (uptodate) {
551 set_buffer_uptodate(bh);
552 } else {
553 clear_buffer_uptodate(bh);
554 if (printk_ratelimit())
555 buffer_io_error(bh);
556 SetPageError(page);
557 }
558
559 /*
560 * Be _very_ careful from here on. Bad things can happen if
561 * two buffer heads end IO at almost the same time and both
562 * decide that the page is now completely done.
563 */
564 spin_lock_irqsave(&page_uptodate_lock, flags);
565 clear_buffer_async_read(bh);
566 unlock_buffer(bh);
567 tmp = bh;
568 do {
569 if (!buffer_uptodate(tmp))
570 page_uptodate = 0;
571 if (buffer_async_read(tmp)) {
572 BUG_ON(!buffer_locked(tmp));
573 goto still_busy;
574 }
575 tmp = tmp->b_this_page;
576 } while (tmp != bh);
577 spin_unlock_irqrestore(&page_uptodate_lock, flags);
578
579 /*
580 * If none of the buffers had errors and they are all
581 * uptodate then we can set the page uptodate.
582 */
583 if (page_uptodate && !PageError(page))
584 SetPageUptodate(page);
585 unlock_page(page);
586 return;
587
588still_busy:
589 spin_unlock_irqrestore(&page_uptodate_lock, flags);
590 return;
591}
592
593/*
594 * Completion handler for block_write_full_page() - pages which are unlocked
595 * during I/O, and which have PageWriteback cleared upon I/O completion.
596 */
597void end_buffer_async_write(struct buffer_head *bh, int uptodate)
598{
599 char b[BDEVNAME_SIZE];
600 static DEFINE_SPINLOCK(page_uptodate_lock);
601 unsigned long flags;
602 struct buffer_head *tmp;
603 struct page *page;
604
605 BUG_ON(!buffer_async_write(bh));
606
607 page = bh->b_page;
608 if (uptodate) {
609 set_buffer_uptodate(bh);
610 } else {
611 if (printk_ratelimit()) {
612 buffer_io_error(bh);
613 printk(KERN_WARNING "lost page write due to "
614 "I/O error on %s\n",
615 bdevname(bh->b_bdev, b));
616 }
617 set_bit(AS_EIO, &page->mapping->flags);
618 clear_buffer_uptodate(bh);
619 SetPageError(page);
620 }
621
622 spin_lock_irqsave(&page_uptodate_lock, flags);
623 clear_buffer_async_write(bh);
624 unlock_buffer(bh);
625 tmp = bh->b_this_page;
626 while (tmp != bh) {
627 if (buffer_async_write(tmp)) {
628 BUG_ON(!buffer_locked(tmp));
629 goto still_busy;
630 }
631 tmp = tmp->b_this_page;
632 }
633 spin_unlock_irqrestore(&page_uptodate_lock, flags);
634 end_page_writeback(page);
635 return;
636
637still_busy:
638 spin_unlock_irqrestore(&page_uptodate_lock, flags);
639 return;
640}
641
642/*
643 * If a page's buffers are under async readin (end_buffer_async_read
644 * completion) then there is a possibility that another thread of
645 * control could lock one of the buffers after it has completed
646 * but while some of the other buffers have not completed. This
647 * locked buffer would confuse end_buffer_async_read() into not unlocking
648 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
649 * that this buffer is not under async I/O.
650 *
651 * The page comes unlocked when it has no locked buffer_async buffers
652 * left.
653 *
654 * PageLocked prevents anyone starting new async I/O reads any of
655 * the buffers.
656 *
657 * PageWriteback is used to prevent simultaneous writeout of the same
658 * page.
659 *
660 * PageLocked prevents anyone from starting writeback of a page which is
661 * under read I/O (PageWriteback is only ever set against a locked page).
662 */
663static void mark_buffer_async_read(struct buffer_head *bh)
664{
665 bh->b_end_io = end_buffer_async_read;
666 set_buffer_async_read(bh);
667}
668
669void mark_buffer_async_write(struct buffer_head *bh)
670{
671 bh->b_end_io = end_buffer_async_write;
672 set_buffer_async_write(bh);
673}
674EXPORT_SYMBOL(mark_buffer_async_write);
675
676
677/*
678 * fs/buffer.c contains helper functions for buffer-backed address space's
679 * fsync functions. A common requirement for buffer-based filesystems is
680 * that certain data from the backing blockdev needs to be written out for
681 * a successful fsync(). For example, ext2 indirect blocks need to be
682 * written back and waited upon before fsync() returns.
683 *
684 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
685 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
686 * management of a list of dependent buffers at ->i_mapping->private_list.
687 *
688 * Locking is a little subtle: try_to_free_buffers() will remove buffers
689 * from their controlling inode's queue when they are being freed. But
690 * try_to_free_buffers() will be operating against the *blockdev* mapping
691 * at the time, not against the S_ISREG file which depends on those buffers.
692 * So the locking for private_list is via the private_lock in the address_space
693 * which backs the buffers. Which is different from the address_space
694 * against which the buffers are listed. So for a particular address_space,
695 * mapping->private_lock does *not* protect mapping->private_list! In fact,
696 * mapping->private_list will always be protected by the backing blockdev's
697 * ->private_lock.
698 *
699 * Which introduces a requirement: all buffers on an address_space's
700 * ->private_list must be from the same address_space: the blockdev's.
701 *
702 * address_spaces which do not place buffers at ->private_list via these
703 * utility functions are free to use private_lock and private_list for
704 * whatever they want. The only requirement is that list_empty(private_list)
705 * be true at clear_inode() time.
706 *
707 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
708 * filesystems should do that. invalidate_inode_buffers() should just go
709 * BUG_ON(!list_empty).
710 *
711 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
712 * take an address_space, not an inode. And it should be called
713 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
714 * queued up.
715 *
716 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
717 * list if it is already on a list. Because if the buffer is on a list,
718 * it *must* already be on the right one. If not, the filesystem is being
719 * silly. This will save a ton of locking. But first we have to ensure
720 * that buffers are taken *off* the old inode's list when they are freed
721 * (presumably in truncate). That requires careful auditing of all
722 * filesystems (do it inside bforget()). It could also be done by bringing
723 * b_inode back.
724 */
725
726/*
727 * The buffer's backing address_space's private_lock must be held
728 */
729static inline void __remove_assoc_queue(struct buffer_head *bh)
730{
731 list_del_init(&bh->b_assoc_buffers);
732}
733
734int inode_has_buffers(struct inode *inode)
735{
736 return !list_empty(&inode->i_data.private_list);
737}
738
739/*
740 * osync is designed to support O_SYNC io. It waits synchronously for
741 * all already-submitted IO to complete, but does not queue any new
742 * writes to the disk.
743 *
744 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
745 * you dirty the buffers, and then use osync_inode_buffers to wait for
746 * completion. Any other dirty buffers which are not yet queued for
747 * write will not be flushed to disk by the osync.
748 */
749static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
750{
751 struct buffer_head *bh;
752 struct list_head *p;
753 int err = 0;
754
755 spin_lock(lock);
756repeat:
757 list_for_each_prev(p, list) {
758 bh = BH_ENTRY(p);
759 if (buffer_locked(bh)) {
760 get_bh(bh);
761 spin_unlock(lock);
762 wait_on_buffer(bh);
763 if (!buffer_uptodate(bh))
764 err = -EIO;
765 brelse(bh);
766 spin_lock(lock);
767 goto repeat;
768 }
769 }
770 spin_unlock(lock);
771 return err;
772}
773
774/**
775 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
776 * buffers
777 * @buffer_mapping - the mapping which backs the buffers' data
778 * @mapping - the mapping which wants those buffers written
779 *
780 * Starts I/O against the buffers at mapping->private_list, and waits upon
781 * that I/O.
782 *
783 * Basically, this is a convenience function for fsync(). @buffer_mapping is
784 * the blockdev which "owns" the buffers and @mapping is a file or directory
785 * which needs those buffers to be written for a successful fsync().
786 */
787int sync_mapping_buffers(struct address_space *mapping)
788{
789 struct address_space *buffer_mapping = mapping->assoc_mapping;
790
791 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
792 return 0;
793
794 return fsync_buffers_list(&buffer_mapping->private_lock,
795 &mapping->private_list);
796}
797EXPORT_SYMBOL(sync_mapping_buffers);
798
799/*
800 * Called when we've recently written block `bblock', and it is known that
801 * `bblock' was for a buffer_boundary() buffer. This means that the block at
802 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
803 * dirty, schedule it for IO. So that indirects merge nicely with their data.
804 */
805void write_boundary_block(struct block_device *bdev,
806 sector_t bblock, unsigned blocksize)
807{
808 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
809 if (bh) {
810 if (buffer_dirty(bh))
811 ll_rw_block(WRITE, 1, &bh);
812 put_bh(bh);
813 }
814}
815
816void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
817{
818 struct address_space *mapping = inode->i_mapping;
819 struct address_space *buffer_mapping = bh->b_page->mapping;
820
821 mark_buffer_dirty(bh);
822 if (!mapping->assoc_mapping) {
823 mapping->assoc_mapping = buffer_mapping;
824 } else {
825 if (mapping->assoc_mapping != buffer_mapping)
826 BUG();
827 }
828 if (list_empty(&bh->b_assoc_buffers)) {
829 spin_lock(&buffer_mapping->private_lock);
830 list_move_tail(&bh->b_assoc_buffers,
831 &mapping->private_list);
832 spin_unlock(&buffer_mapping->private_lock);
833 }
834}
835EXPORT_SYMBOL(mark_buffer_dirty_inode);
836
837/*
838 * Add a page to the dirty page list.
839 *
840 * It is a sad fact of life that this function is called from several places
841 * deeply under spinlocking. It may not sleep.
842 *
843 * If the page has buffers, the uptodate buffers are set dirty, to preserve
844 * dirty-state coherency between the page and the buffers. It the page does
845 * not have buffers then when they are later attached they will all be set
846 * dirty.
847 *
848 * The buffers are dirtied before the page is dirtied. There's a small race
849 * window in which a writepage caller may see the page cleanness but not the
850 * buffer dirtiness. That's fine. If this code were to set the page dirty
851 * before the buffers, a concurrent writepage caller could clear the page dirty
852 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
853 * page on the dirty page list.
854 *
855 * We use private_lock to lock against try_to_free_buffers while using the
856 * page's buffer list. Also use this to protect against clean buffers being
857 * added to the page after it was set dirty.
858 *
859 * FIXME: may need to call ->reservepage here as well. That's rather up to the
860 * address_space though.
861 */
862int __set_page_dirty_buffers(struct page *page)
863{
864 struct address_space * const mapping = page->mapping;
865
866 spin_lock(&mapping->private_lock);
867 if (page_has_buffers(page)) {
868 struct buffer_head *head = page_buffers(page);
869 struct buffer_head *bh = head;
870
871 do {
872 set_buffer_dirty(bh);
873 bh = bh->b_this_page;
874 } while (bh != head);
875 }
876 spin_unlock(&mapping->private_lock);
877
878 if (!TestSetPageDirty(page)) {
879 write_lock_irq(&mapping->tree_lock);
880 if (page->mapping) { /* Race with truncate? */
881 if (mapping_cap_account_dirty(mapping))
882 inc_page_state(nr_dirty);
883 radix_tree_tag_set(&mapping->page_tree,
884 page_index(page),
885 PAGECACHE_TAG_DIRTY);
886 }
887 write_unlock_irq(&mapping->tree_lock);
888 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
889 }
890
891 return 0;
892}
893EXPORT_SYMBOL(__set_page_dirty_buffers);
894
895/*
896 * Write out and wait upon a list of buffers.
897 *
898 * We have conflicting pressures: we want to make sure that all
899 * initially dirty buffers get waited on, but that any subsequently
900 * dirtied buffers don't. After all, we don't want fsync to last
901 * forever if somebody is actively writing to the file.
902 *
903 * Do this in two main stages: first we copy dirty buffers to a
904 * temporary inode list, queueing the writes as we go. Then we clean
905 * up, waiting for those writes to complete.
906 *
907 * During this second stage, any subsequent updates to the file may end
908 * up refiling the buffer on the original inode's dirty list again, so
909 * there is a chance we will end up with a buffer queued for write but
910 * not yet completed on that list. So, as a final cleanup we go through
911 * the osync code to catch these locked, dirty buffers without requeuing
912 * any newly dirty buffers for write.
913 */
914static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
915{
916 struct buffer_head *bh;
917 struct list_head tmp;
918 int err = 0, err2;
919
920 INIT_LIST_HEAD(&tmp);
921
922 spin_lock(lock);
923 while (!list_empty(list)) {
924 bh = BH_ENTRY(list->next);
925 list_del_init(&bh->b_assoc_buffers);
926 if (buffer_dirty(bh) || buffer_locked(bh)) {
927 list_add(&bh->b_assoc_buffers, &tmp);
928 if (buffer_dirty(bh)) {
929 get_bh(bh);
930 spin_unlock(lock);
931 /*
932 * Ensure any pending I/O completes so that
933 * ll_rw_block() actually writes the current
934 * contents - it is a noop if I/O is still in
935 * flight on potentially older contents.
936 */
937 wait_on_buffer(bh);
938 ll_rw_block(WRITE, 1, &bh);
939 brelse(bh);
940 spin_lock(lock);
941 }
942 }
943 }
944
945 while (!list_empty(&tmp)) {
946 bh = BH_ENTRY(tmp.prev);
947 __remove_assoc_queue(bh);
948 get_bh(bh);
949 spin_unlock(lock);
950 wait_on_buffer(bh);
951 if (!buffer_uptodate(bh))
952 err = -EIO;
953 brelse(bh);
954 spin_lock(lock);
955 }
956
957 spin_unlock(lock);
958 err2 = osync_buffers_list(lock, list);
959 if (err)
960 return err;
961 else
962 return err2;
963}
964
965/*
966 * Invalidate any and all dirty buffers on a given inode. We are
967 * probably unmounting the fs, but that doesn't mean we have already
968 * done a sync(). Just drop the buffers from the inode list.
969 *
970 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
971 * assumes that all the buffers are against the blockdev. Not true
972 * for reiserfs.
973 */
974void invalidate_inode_buffers(struct inode *inode)
975{
976 if (inode_has_buffers(inode)) {
977 struct address_space *mapping = &inode->i_data;
978 struct list_head *list = &mapping->private_list;
979 struct address_space *buffer_mapping = mapping->assoc_mapping;
980
981 spin_lock(&buffer_mapping->private_lock);
982 while (!list_empty(list))
983 __remove_assoc_queue(BH_ENTRY(list->next));
984 spin_unlock(&buffer_mapping->private_lock);
985 }
986}
987
988/*
989 * Remove any clean buffers from the inode's buffer list. This is called
990 * when we're trying to free the inode itself. Those buffers can pin it.
991 *
992 * Returns true if all buffers were removed.
993 */
994int remove_inode_buffers(struct inode *inode)
995{
996 int ret = 1;
997
998 if (inode_has_buffers(inode)) {
999 struct address_space *mapping = &inode->i_data;
1000 struct list_head *list = &mapping->private_list;
1001 struct address_space *buffer_mapping = mapping->assoc_mapping;
1002
1003 spin_lock(&buffer_mapping->private_lock);
1004 while (!list_empty(list)) {
1005 struct buffer_head *bh = BH_ENTRY(list->next);
1006 if (buffer_dirty(bh)) {
1007 ret = 0;
1008 break;
1009 }
1010 __remove_assoc_queue(bh);
1011 }
1012 spin_unlock(&buffer_mapping->private_lock);
1013 }
1014 return ret;
1015}
1016
1017/*
1018 * Create the appropriate buffers when given a page for data area and
1019 * the size of each buffer.. Use the bh->b_this_page linked list to
1020 * follow the buffers created. Return NULL if unable to create more
1021 * buffers.
1022 *
1023 * The retry flag is used to differentiate async IO (paging, swapping)
1024 * which may not fail from ordinary buffer allocations.
1025 */
1026struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
1027 int retry)
1028{
1029 struct buffer_head *bh, *head;
1030 long offset;
1031
1032try_again:
1033 head = NULL;
1034 offset = PAGE_SIZE;
1035 while ((offset -= size) >= 0) {
1036 bh = alloc_buffer_head(GFP_NOFS);
1037 if (!bh)
1038 goto no_grow;
1039
1040 bh->b_bdev = NULL;
1041 bh->b_this_page = head;
1042 bh->b_blocknr = -1;
1043 head = bh;
1044
1045 bh->b_state = 0;
1046 atomic_set(&bh->b_count, 0);
1047 bh->b_size = size;
1048
1049 /* Link the buffer to its page */
1050 set_bh_page(bh, page, offset);
1051
1052 bh->b_end_io = NULL;
1053 }
1054 return head;
1055/*
1056 * In case anything failed, we just free everything we got.
1057 */
1058no_grow:
1059 if (head) {
1060 do {
1061 bh = head;
1062 head = head->b_this_page;
1063 free_buffer_head(bh);
1064 } while (head);
1065 }
1066
1067 /*
1068 * Return failure for non-async IO requests. Async IO requests
1069 * are not allowed to fail, so we have to wait until buffer heads
1070 * become available. But we don't want tasks sleeping with
1071 * partially complete buffers, so all were released above.
1072 */
1073 if (!retry)
1074 return NULL;
1075
1076 /* We're _really_ low on memory. Now we just
1077 * wait for old buffer heads to become free due to
1078 * finishing IO. Since this is an async request and
1079 * the reserve list is empty, we're sure there are
1080 * async buffer heads in use.
1081 */
1082 free_more_memory();
1083 goto try_again;
1084}
1085EXPORT_SYMBOL_GPL(alloc_page_buffers);
1086
1087static inline void
1088link_dev_buffers(struct page *page, struct buffer_head *head)
1089{
1090 struct buffer_head *bh, *tail;
1091
1092 bh = head;
1093 do {
1094 tail = bh;
1095 bh = bh->b_this_page;
1096 } while (bh);
1097 tail->b_this_page = head;
1098 attach_page_buffers(page, head);
1099}
1100
1101/*
1102 * Initialise the state of a blockdev page's buffers.
1103 */
1104static void
1105init_page_buffers(struct page *page, struct block_device *bdev,
1106 sector_t block, int size)
1107{
1108 struct buffer_head *head = page_buffers(page);
1109 struct buffer_head *bh = head;
1110 int uptodate = PageUptodate(page);
1111
1112 do {
1113 if (!buffer_mapped(bh)) {
1114 init_buffer(bh, NULL, NULL);
1115 bh->b_bdev = bdev;
1116 bh->b_blocknr = block;
1117 if (uptodate)
1118 set_buffer_uptodate(bh);
1119 set_buffer_mapped(bh);
1120 }
1121 block++;
1122 bh = bh->b_this_page;
1123 } while (bh != head);
1124}
1125
1126/*
1127 * Create the page-cache page that contains the requested block.
1128 *
1129 * This is user purely for blockdev mappings.
1130 */
1131static struct page *
1132grow_dev_page(struct block_device *bdev, sector_t block,
1133 pgoff_t index, int size)
1134{
1135 struct inode *inode = bdev->bd_inode;
1136 struct page *page;
1137 struct buffer_head *bh;
1138
1139 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1140 if (!page)
1141 return NULL;
1142
1143 if (!PageLocked(page))
1144 BUG();
1145
1146 if (page_has_buffers(page)) {
1147 bh = page_buffers(page);
1148 if (bh->b_size == size) {
1149 init_page_buffers(page, bdev, block, size);
1150 return page;
1151 }
1152 if (!try_to_free_buffers(page))
1153 goto failed;
1154 }
1155
1156 /*
1157 * Allocate some buffers for this page
1158 */
1159 bh = alloc_page_buffers(page, size, 0);
1160 if (!bh)
1161 goto failed;
1162
1163 /*
1164 * Link the page to the buffers and initialise them. Take the
1165 * lock to be atomic wrt __find_get_block(), which does not
1166 * run under the page lock.
1167 */
1168 spin_lock(&inode->i_mapping->private_lock);
1169 link_dev_buffers(page, bh);
1170 init_page_buffers(page, bdev, block, size);
1171 spin_unlock(&inode->i_mapping->private_lock);
1172 return page;
1173
1174failed:
1175 BUG();
1176 unlock_page(page);
1177 page_cache_release(page);
1178 return NULL;
1179}
1180
1181/*
1182 * Create buffers for the specified block device block's page. If
1183 * that page was dirty, the buffers are set dirty also.
1184 *
1185 * Except that's a bug. Attaching dirty buffers to a dirty
1186 * blockdev's page can result in filesystem corruption, because
1187 * some of those buffers may be aliases of filesystem data.
1188 * grow_dev_page() will go BUG() if this happens.
1189 */
1190static inline int
1191grow_buffers(struct block_device *bdev, sector_t block, int size)
1192{
1193 struct page *page;
1194 pgoff_t index;
1195 int sizebits;
1196
1197 sizebits = -1;
1198 do {
1199 sizebits++;
1200 } while ((size << sizebits) < PAGE_SIZE);
1201
1202 index = block >> sizebits;
1203 block = index << sizebits;
1204
1205 /* Create a page with the proper size buffers.. */
1206 page = grow_dev_page(bdev, block, index, size);
1207 if (!page)
1208 return 0;
1209 unlock_page(page);
1210 page_cache_release(page);
1211 return 1;
1212}
1213
1214struct buffer_head *
1215__getblk_slow(struct block_device *bdev, sector_t block, int size)
1216{
1217 /* Size must be multiple of hard sectorsize */
1218 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1219 (size < 512 || size > PAGE_SIZE))) {
1220 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1221 size);
1222 printk(KERN_ERR "hardsect size: %d\n",
1223 bdev_hardsect_size(bdev));
1224
1225 dump_stack();
1226 return NULL;
1227 }
1228
1229 for (;;) {
1230 struct buffer_head * bh;
1231
1232 bh = __find_get_block(bdev, block, size);
1233 if (bh)
1234 return bh;
1235
1236 if (!grow_buffers(bdev, block, size))
1237 free_more_memory();
1238 }
1239}
1240
1241/*
1242 * The relationship between dirty buffers and dirty pages:
1243 *
1244 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1245 * the page is tagged dirty in its radix tree.
1246 *
1247 * At all times, the dirtiness of the buffers represents the dirtiness of
1248 * subsections of the page. If the page has buffers, the page dirty bit is
1249 * merely a hint about the true dirty state.
1250 *
1251 * When a page is set dirty in its entirety, all its buffers are marked dirty
1252 * (if the page has buffers).
1253 *
1254 * When a buffer is marked dirty, its page is dirtied, but the page's other
1255 * buffers are not.
1256 *
1257 * Also. When blockdev buffers are explicitly read with bread(), they
1258 * individually become uptodate. But their backing page remains not
1259 * uptodate - even if all of its buffers are uptodate. A subsequent
1260 * block_read_full_page() against that page will discover all the uptodate
1261 * buffers, will set the page uptodate and will perform no I/O.
1262 */
1263
1264/**
1265 * mark_buffer_dirty - mark a buffer_head as needing writeout
1266 *
1267 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1268 * backing page dirty, then tag the page as dirty in its address_space's radix
1269 * tree and then attach the address_space's inode to its superblock's dirty
1270 * inode list.
1271 *
1272 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1273 * mapping->tree_lock and the global inode_lock.
1274 */
1275void fastcall mark_buffer_dirty(struct buffer_head *bh)
1276{
1277 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1278 __set_page_dirty_nobuffers(bh->b_page);
1279}
1280
1281/*
1282 * Decrement a buffer_head's reference count. If all buffers against a page
1283 * have zero reference count, are clean and unlocked, and if the page is clean
1284 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1285 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1286 * a page but it ends up not being freed, and buffers may later be reattached).
1287 */
1288void __brelse(struct buffer_head * buf)
1289{
1290 if (atomic_read(&buf->b_count)) {
1291 put_bh(buf);
1292 return;
1293 }
1294 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1295 WARN_ON(1);
1296}
1297
1298/*
1299 * bforget() is like brelse(), except it discards any
1300 * potentially dirty data.
1301 */
1302void __bforget(struct buffer_head *bh)
1303{
1304 clear_buffer_dirty(bh);
1305 if (!list_empty(&bh->b_assoc_buffers)) {
1306 struct address_space *buffer_mapping = bh->b_page->mapping;
1307
1308 spin_lock(&buffer_mapping->private_lock);
1309 list_del_init(&bh->b_assoc_buffers);
1310 spin_unlock(&buffer_mapping->private_lock);
1311 }
1312 __brelse(bh);
1313}
1314
1315static struct buffer_head *__bread_slow(struct buffer_head *bh)
1316{
1317 lock_buffer(bh);
1318 if (buffer_uptodate(bh)) {
1319 unlock_buffer(bh);
1320 return bh;
1321 } else {
1322 get_bh(bh);
1323 bh->b_end_io = end_buffer_read_sync;
1324 submit_bh(READ, bh);
1325 wait_on_buffer(bh);
1326 if (buffer_uptodate(bh))
1327 return bh;
1328 }
1329 brelse(bh);
1330 return NULL;
1331}
1332
1333/*
1334 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1335 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1336 * refcount elevated by one when they're in an LRU. A buffer can only appear
1337 * once in a particular CPU's LRU. A single buffer can be present in multiple
1338 * CPU's LRUs at the same time.
1339 *
1340 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1341 * sb_find_get_block().
1342 *
1343 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1344 * a local interrupt disable for that.
1345 */
1346
1347#define BH_LRU_SIZE 8
1348
1349struct bh_lru {
1350 struct buffer_head *bhs[BH_LRU_SIZE];
1351};
1352
1353static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1354
1355#ifdef CONFIG_SMP
1356#define bh_lru_lock() local_irq_disable()
1357#define bh_lru_unlock() local_irq_enable()
1358#else
1359#define bh_lru_lock() preempt_disable()
1360#define bh_lru_unlock() preempt_enable()
1361#endif
1362
1363static inline void check_irqs_on(void)
1364{
1365#ifdef irqs_disabled
1366 BUG_ON(irqs_disabled());
1367#endif
1368}
1369
1370/*
1371 * The LRU management algorithm is dopey-but-simple. Sorry.
1372 */
1373static void bh_lru_install(struct buffer_head *bh)
1374{
1375 struct buffer_head *evictee = NULL;
1376 struct bh_lru *lru;
1377
1378 check_irqs_on();
1379 bh_lru_lock();
1380 lru = &__get_cpu_var(bh_lrus);
1381 if (lru->bhs[0] != bh) {
1382 struct buffer_head *bhs[BH_LRU_SIZE];
1383 int in;
1384 int out = 0;
1385
1386 get_bh(bh);
1387 bhs[out++] = bh;
1388 for (in = 0; in < BH_LRU_SIZE; in++) {
1389 struct buffer_head *bh2 = lru->bhs[in];
1390
1391 if (bh2 == bh) {
1392 __brelse(bh2);
1393 } else {
1394 if (out >= BH_LRU_SIZE) {
1395 BUG_ON(evictee != NULL);
1396 evictee = bh2;
1397 } else {
1398 bhs[out++] = bh2;
1399 }
1400 }
1401 }
1402 while (out < BH_LRU_SIZE)
1403 bhs[out++] = NULL;
1404 memcpy(lru->bhs, bhs, sizeof(bhs));
1405 }
1406 bh_lru_unlock();
1407
1408 if (evictee)
1409 __brelse(evictee);
1410}
1411
1412/*
1413 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1414 */
1415static inline struct buffer_head *
1416lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1417{
1418 struct buffer_head *ret = NULL;
1419 struct bh_lru *lru;
1420 int i;
1421
1422 check_irqs_on();
1423 bh_lru_lock();
1424 lru = &__get_cpu_var(bh_lrus);
1425 for (i = 0; i < BH_LRU_SIZE; i++) {
1426 struct buffer_head *bh = lru->bhs[i];
1427
1428 if (bh && bh->b_bdev == bdev &&
1429 bh->b_blocknr == block && bh->b_size == size) {
1430 if (i) {
1431 while (i) {
1432 lru->bhs[i] = lru->bhs[i - 1];
1433 i--;
1434 }
1435 lru->bhs[0] = bh;
1436 }
1437 get_bh(bh);
1438 ret = bh;
1439 break;
1440 }
1441 }
1442 bh_lru_unlock();
1443 return ret;
1444}
1445
1446/*
1447 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1448 * it in the LRU and mark it as accessed. If it is not present then return
1449 * NULL
1450 */
1451struct buffer_head *
1452__find_get_block(struct block_device *bdev, sector_t block, int size)
1453{
1454 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1455
1456 if (bh == NULL) {
1457 bh = __find_get_block_slow(bdev, block, size);
1458 if (bh)
1459 bh_lru_install(bh);
1460 }
1461 if (bh)
1462 touch_buffer(bh);
1463 return bh;
1464}
1465EXPORT_SYMBOL(__find_get_block);
1466
1467/*
1468 * __getblk will locate (and, if necessary, create) the buffer_head
1469 * which corresponds to the passed block_device, block and size. The
1470 * returned buffer has its reference count incremented.
1471 *
1472 * __getblk() cannot fail - it just keeps trying. If you pass it an
1473 * illegal block number, __getblk() will happily return a buffer_head
1474 * which represents the non-existent block. Very weird.
1475 *
1476 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1477 * attempt is failing. FIXME, perhaps?
1478 */
1479struct buffer_head *
1480__getblk(struct block_device *bdev, sector_t block, int size)
1481{
1482 struct buffer_head *bh = __find_get_block(bdev, block, size);
1483
1484 might_sleep();
1485 if (bh == NULL)
1486 bh = __getblk_slow(bdev, block, size);
1487 return bh;
1488}
1489EXPORT_SYMBOL(__getblk);
1490
1491/*
1492 * Do async read-ahead on a buffer..
1493 */
1494void __breadahead(struct block_device *bdev, sector_t block, int size)
1495{
1496 struct buffer_head *bh = __getblk(bdev, block, size);
1497 ll_rw_block(READA, 1, &bh);
1498 brelse(bh);
1499}
1500EXPORT_SYMBOL(__breadahead);
1501
1502/**
1503 * __bread() - reads a specified block and returns the bh
1504 * @block: number of block
1505 * @size: size (in bytes) to read
1506 *
1507 * Reads a specified block, and returns buffer head that contains it.
1508 * It returns NULL if the block was unreadable.
1509 */
1510struct buffer_head *
1511__bread(struct block_device *bdev, sector_t block, int size)
1512{
1513 struct buffer_head *bh = __getblk(bdev, block, size);
1514
1515 if (!buffer_uptodate(bh))
1516 bh = __bread_slow(bh);
1517 return bh;
1518}
1519EXPORT_SYMBOL(__bread);
1520
1521/*
1522 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1523 * This doesn't race because it runs in each cpu either in irq
1524 * or with preempt disabled.
1525 */
1526static void invalidate_bh_lru(void *arg)
1527{
1528 struct bh_lru *b = &get_cpu_var(bh_lrus);
1529 int i;
1530
1531 for (i = 0; i < BH_LRU_SIZE; i++) {
1532 brelse(b->bhs[i]);
1533 b->bhs[i] = NULL;
1534 }
1535 put_cpu_var(bh_lrus);
1536}
1537
1538static void invalidate_bh_lrus(void)
1539{
1540 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1541}
1542
1543void set_bh_page(struct buffer_head *bh,
1544 struct page *page, unsigned long offset)
1545{
1546 bh->b_page = page;
1547 if (offset >= PAGE_SIZE)
1548 BUG();
1549 if (PageHighMem(page))
1550 /*
1551 * This catches illegal uses and preserves the offset:
1552 */
1553 bh->b_data = (char *)(0 + offset);
1554 else
1555 bh->b_data = page_address(page) + offset;
1556}
1557EXPORT_SYMBOL(set_bh_page);
1558
1559/*
1560 * Called when truncating a buffer on a page completely.
1561 */
1562static inline void discard_buffer(struct buffer_head * bh)
1563{
1564 lock_buffer(bh);
1565 clear_buffer_dirty(bh);
1566 bh->b_bdev = NULL;
1567 clear_buffer_mapped(bh);
1568 clear_buffer_req(bh);
1569 clear_buffer_new(bh);
1570 clear_buffer_delay(bh);
1571 unlock_buffer(bh);
1572}
1573
1574/**
1575 * try_to_release_page() - release old fs-specific metadata on a page
1576 *
1577 * @page: the page which the kernel is trying to free
1578 * @gfp_mask: memory allocation flags (and I/O mode)
1579 *
1580 * The address_space is to try to release any data against the page
1581 * (presumably at page->private). If the release was successful, return `1'.
1582 * Otherwise return zero.
1583 *
1584 * The @gfp_mask argument specifies whether I/O may be performed to release
1585 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1586 *
1587 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1588 */
1589int try_to_release_page(struct page *page, int gfp_mask)
1590{
1591 struct address_space * const mapping = page->mapping;
1592
1593 BUG_ON(!PageLocked(page));
1594 if (PageWriteback(page))
1595 return 0;
1596
1597 if (mapping && mapping->a_ops->releasepage)
1598 return mapping->a_ops->releasepage(page, gfp_mask);
1599 return try_to_free_buffers(page);
1600}
1601EXPORT_SYMBOL(try_to_release_page);
1602
1603/**
1604 * block_invalidatepage - invalidate part of all of a buffer-backed page
1605 *
1606 * @page: the page which is affected
1607 * @offset: the index of the truncation point
1608 *
1609 * block_invalidatepage() is called when all or part of the page has become
1610 * invalidatedby a truncate operation.
1611 *
1612 * block_invalidatepage() does not have to release all buffers, but it must
1613 * ensure that no dirty buffer is left outside @offset and that no I/O
1614 * is underway against any of the blocks which are outside the truncation
1615 * point. Because the caller is about to free (and possibly reuse) those
1616 * blocks on-disk.
1617 */
1618int block_invalidatepage(struct page *page, unsigned long offset)
1619{
1620 struct buffer_head *head, *bh, *next;
1621 unsigned int curr_off = 0;
1622 int ret = 1;
1623
1624 BUG_ON(!PageLocked(page));
1625 if (!page_has_buffers(page))
1626 goto out;
1627
1628 head = page_buffers(page);
1629 bh = head;
1630 do {
1631 unsigned int next_off = curr_off + bh->b_size;
1632 next = bh->b_this_page;
1633
1634 /*
1635 * is this block fully invalidated?
1636 */
1637 if (offset <= curr_off)
1638 discard_buffer(bh);
1639 curr_off = next_off;
1640 bh = next;
1641 } while (bh != head);
1642
1643 /*
1644 * We release buffers only if the entire page is being invalidated.
1645 * The get_block cached value has been unconditionally invalidated,
1646 * so real IO is not possible anymore.
1647 */
1648 if (offset == 0)
1649 ret = try_to_release_page(page, 0);
1650out:
1651 return ret;
1652}
1653EXPORT_SYMBOL(block_invalidatepage);
1654
1655/*
1656 * We attach and possibly dirty the buffers atomically wrt
1657 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1658 * is already excluded via the page lock.
1659 */
1660void create_empty_buffers(struct page *page,
1661 unsigned long blocksize, unsigned long b_state)
1662{
1663 struct buffer_head *bh, *head, *tail;
1664
1665 head = alloc_page_buffers(page, blocksize, 1);
1666 bh = head;
1667 do {
1668 bh->b_state |= b_state;
1669 tail = bh;
1670 bh = bh->b_this_page;
1671 } while (bh);
1672 tail->b_this_page = head;
1673
1674 spin_lock(&page->mapping->private_lock);
1675 if (PageUptodate(page) || PageDirty(page)) {
1676 bh = head;
1677 do {
1678 if (PageDirty(page))
1679 set_buffer_dirty(bh);
1680 if (PageUptodate(page))
1681 set_buffer_uptodate(bh);
1682 bh = bh->b_this_page;
1683 } while (bh != head);
1684 }
1685 attach_page_buffers(page, head);
1686 spin_unlock(&page->mapping->private_lock);
1687}
1688EXPORT_SYMBOL(create_empty_buffers);
1689
1690/*
1691 * We are taking a block for data and we don't want any output from any
1692 * buffer-cache aliases starting from return from that function and
1693 * until the moment when something will explicitly mark the buffer
1694 * dirty (hopefully that will not happen until we will free that block ;-)
1695 * We don't even need to mark it not-uptodate - nobody can expect
1696 * anything from a newly allocated buffer anyway. We used to used
1697 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1698 * don't want to mark the alias unmapped, for example - it would confuse
1699 * anyone who might pick it with bread() afterwards...
1700 *
1701 * Also.. Note that bforget() doesn't lock the buffer. So there can
1702 * be writeout I/O going on against recently-freed buffers. We don't
1703 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1704 * only if we really need to. That happens here.
1705 */
1706void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1707{
1708 struct buffer_head *old_bh;
1709
1710 might_sleep();
1711
1712 old_bh = __find_get_block_slow(bdev, block, 0);
1713 if (old_bh) {
1714 clear_buffer_dirty(old_bh);
1715 wait_on_buffer(old_bh);
1716 clear_buffer_req(old_bh);
1717 __brelse(old_bh);
1718 }
1719}
1720EXPORT_SYMBOL(unmap_underlying_metadata);
1721
1722/*
1723 * NOTE! All mapped/uptodate combinations are valid:
1724 *
1725 * Mapped Uptodate Meaning
1726 *
1727 * No No "unknown" - must do get_block()
1728 * No Yes "hole" - zero-filled
1729 * Yes No "allocated" - allocated on disk, not read in
1730 * Yes Yes "valid" - allocated and up-to-date in memory.
1731 *
1732 * "Dirty" is valid only with the last case (mapped+uptodate).
1733 */
1734
1735/*
1736 * While block_write_full_page is writing back the dirty buffers under
1737 * the page lock, whoever dirtied the buffers may decide to clean them
1738 * again at any time. We handle that by only looking at the buffer
1739 * state inside lock_buffer().
1740 *
1741 * If block_write_full_page() is called for regular writeback
1742 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1743 * locked buffer. This only can happen if someone has written the buffer
1744 * directly, with submit_bh(). At the address_space level PageWriteback
1745 * prevents this contention from occurring.
1746 */
1747static int __block_write_full_page(struct inode *inode, struct page *page,
1748 get_block_t *get_block, struct writeback_control *wbc)
1749{
1750 int err;
1751 sector_t block;
1752 sector_t last_block;
1753 struct buffer_head *bh, *head;
1754 int nr_underway = 0;
1755
1756 BUG_ON(!PageLocked(page));
1757
1758 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1759
1760 if (!page_has_buffers(page)) {
1761 create_empty_buffers(page, 1 << inode->i_blkbits,
1762 (1 << BH_Dirty)|(1 << BH_Uptodate));
1763 }
1764
1765 /*
1766 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1767 * here, and the (potentially unmapped) buffers may become dirty at
1768 * any time. If a buffer becomes dirty here after we've inspected it
1769 * then we just miss that fact, and the page stays dirty.
1770 *
1771 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1772 * handle that here by just cleaning them.
1773 */
1774
1775 block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1776 head = page_buffers(page);
1777 bh = head;
1778
1779 /*
1780 * Get all the dirty buffers mapped to disk addresses and
1781 * handle any aliases from the underlying blockdev's mapping.
1782 */
1783 do {
1784 if (block > last_block) {
1785 /*
1786 * mapped buffers outside i_size will occur, because
1787 * this page can be outside i_size when there is a
1788 * truncate in progress.
1789 */
1790 /*
1791 * The buffer was zeroed by block_write_full_page()
1792 */
1793 clear_buffer_dirty(bh);
1794 set_buffer_uptodate(bh);
1795 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1796 err = get_block(inode, block, bh, 1);
1797 if (err)
1798 goto recover;
1799 if (buffer_new(bh)) {
1800 /* blockdev mappings never come here */
1801 clear_buffer_new(bh);
1802 unmap_underlying_metadata(bh->b_bdev,
1803 bh->b_blocknr);
1804 }
1805 }
1806 bh = bh->b_this_page;
1807 block++;
1808 } while (bh != head);
1809
1810 do {
1811 get_bh(bh);
1812 if (!buffer_mapped(bh))
1813 continue;
1814 /*
1815 * If it's a fully non-blocking write attempt and we cannot
1816 * lock the buffer then redirty the page. Note that this can
1817 * potentially cause a busy-wait loop from pdflush and kswapd
1818 * activity, but those code paths have their own higher-level
1819 * throttling.
1820 */
1821 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1822 lock_buffer(bh);
1823 } else if (test_set_buffer_locked(bh)) {
1824 redirty_page_for_writepage(wbc, page);
1825 continue;
1826 }
1827 if (test_clear_buffer_dirty(bh)) {
1828 mark_buffer_async_write(bh);
1829 } else {
1830 unlock_buffer(bh);
1831 }
1832 } while ((bh = bh->b_this_page) != head);
1833
1834 /*
1835 * The page and its buffers are protected by PageWriteback(), so we can
1836 * drop the bh refcounts early.
1837 */
1838 BUG_ON(PageWriteback(page));
1839 set_page_writeback(page);
1840 unlock_page(page);
1841
1842 do {
1843 struct buffer_head *next = bh->b_this_page;
1844 if (buffer_async_write(bh)) {
1845 submit_bh(WRITE, bh);
1846 nr_underway++;
1847 }
1848 put_bh(bh);
1849 bh = next;
1850 } while (bh != head);
1851
1852 err = 0;
1853done:
1854 if (nr_underway == 0) {
1855 /*
1856 * The page was marked dirty, but the buffers were
1857 * clean. Someone wrote them back by hand with
1858 * ll_rw_block/submit_bh. A rare case.
1859 */
1860 int uptodate = 1;
1861 do {
1862 if (!buffer_uptodate(bh)) {
1863 uptodate = 0;
1864 break;
1865 }
1866 bh = bh->b_this_page;
1867 } while (bh != head);
1868 if (uptodate)
1869 SetPageUptodate(page);
1870 end_page_writeback(page);
1871 /*
1872 * The page and buffer_heads can be released at any time from
1873 * here on.
1874 */
1875 wbc->pages_skipped++; /* We didn't write this page */
1876 }
1877 return err;
1878
1879recover:
1880 /*
1881 * ENOSPC, or some other error. We may already have added some
1882 * blocks to the file, so we need to write these out to avoid
1883 * exposing stale data.
1884 * The page is currently locked and not marked for writeback
1885 */
1886 bh = head;
1887 /* Recovery: lock and submit the mapped buffers */
1888 do {
1889 get_bh(bh);
1890 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1891 lock_buffer(bh);
1892 mark_buffer_async_write(bh);
1893 } else {
1894 /*
1895 * The buffer may have been set dirty during
1896 * attachment to a dirty page.
1897 */
1898 clear_buffer_dirty(bh);
1899 }
1900 } while ((bh = bh->b_this_page) != head);
1901 SetPageError(page);
1902 BUG_ON(PageWriteback(page));
1903 set_page_writeback(page);
1904 unlock_page(page);
1905 do {
1906 struct buffer_head *next = bh->b_this_page;
1907 if (buffer_async_write(bh)) {
1908 clear_buffer_dirty(bh);
1909 submit_bh(WRITE, bh);
1910 nr_underway++;
1911 }
1912 put_bh(bh);
1913 bh = next;
1914 } while (bh != head);
1915 goto done;
1916}
1917
1918static int __block_prepare_write(struct inode *inode, struct page *page,
1919 unsigned from, unsigned to, get_block_t *get_block)
1920{
1921 unsigned block_start, block_end;
1922 sector_t block;
1923 int err = 0;
1924 unsigned blocksize, bbits;
1925 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1926
1927 BUG_ON(!PageLocked(page));
1928 BUG_ON(from > PAGE_CACHE_SIZE);
1929 BUG_ON(to > PAGE_CACHE_SIZE);
1930 BUG_ON(from > to);
1931
1932 blocksize = 1 << inode->i_blkbits;
1933 if (!page_has_buffers(page))
1934 create_empty_buffers(page, blocksize, 0);
1935 head = page_buffers(page);
1936
1937 bbits = inode->i_blkbits;
1938 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1939
1940 for(bh = head, block_start = 0; bh != head || !block_start;
1941 block++, block_start=block_end, bh = bh->b_this_page) {
1942 block_end = block_start + blocksize;
1943 if (block_end <= from || block_start >= to) {
1944 if (PageUptodate(page)) {
1945 if (!buffer_uptodate(bh))
1946 set_buffer_uptodate(bh);
1947 }
1948 continue;
1949 }
1950 if (buffer_new(bh))
1951 clear_buffer_new(bh);
1952 if (!buffer_mapped(bh)) {
1953 err = get_block(inode, block, bh, 1);
1954 if (err)
1955 goto out;
1956 if (buffer_new(bh)) {
1957 clear_buffer_new(bh);
1958 unmap_underlying_metadata(bh->b_bdev,
1959 bh->b_blocknr);
1960 if (PageUptodate(page)) {
1961 set_buffer_uptodate(bh);
1962 continue;
1963 }
1964 if (block_end > to || block_start < from) {
1965 void *kaddr;
1966
1967 kaddr = kmap_atomic(page, KM_USER0);
1968 if (block_end > to)
1969 memset(kaddr+to, 0,
1970 block_end-to);
1971 if (block_start < from)
1972 memset(kaddr+block_start,
1973 0, from-block_start);
1974 flush_dcache_page(page);
1975 kunmap_atomic(kaddr, KM_USER0);
1976 }
1977 continue;
1978 }
1979 }
1980 if (PageUptodate(page)) {
1981 if (!buffer_uptodate(bh))
1982 set_buffer_uptodate(bh);
1983 continue;
1984 }
1985 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1986 (block_start < from || block_end > to)) {
1987 ll_rw_block(READ, 1, &bh);
1988 *wait_bh++=bh;
1989 }
1990 }
1991 /*
1992 * If we issued read requests - let them complete.
1993 */
1994 while(wait_bh > wait) {
1995 wait_on_buffer(*--wait_bh);
1996 if (!buffer_uptodate(*wait_bh))
1997 return -EIO;
1998 }
1999 return 0;
2000out:
2001 /*
2002 * Zero out any newly allocated blocks to avoid exposing stale
2003 * data. If BH_New is set, we know that the block was newly
2004 * allocated in the above loop.
2005 */
2006 bh = head;
2007 block_start = 0;
2008 do {
2009 block_end = block_start+blocksize;
2010 if (block_end <= from)
2011 goto next_bh;
2012 if (block_start >= to)
2013 break;
2014 if (buffer_new(bh)) {
2015 void *kaddr;
2016
2017 clear_buffer_new(bh);
2018 kaddr = kmap_atomic(page, KM_USER0);
2019 memset(kaddr+block_start, 0, bh->b_size);
2020 kunmap_atomic(kaddr, KM_USER0);
2021 set_buffer_uptodate(bh);
2022 mark_buffer_dirty(bh);
2023 }
2024next_bh:
2025 block_start = block_end;
2026 bh = bh->b_this_page;
2027 } while (bh != head);
2028 return err;
2029}
2030
2031static int __block_commit_write(struct inode *inode, struct page *page,
2032 unsigned from, unsigned to)
2033{
2034 unsigned block_start, block_end;
2035 int partial = 0;
2036 unsigned blocksize;
2037 struct buffer_head *bh, *head;
2038
2039 blocksize = 1 << inode->i_blkbits;
2040
2041 for(bh = head = page_buffers(page), block_start = 0;
2042 bh != head || !block_start;
2043 block_start=block_end, bh = bh->b_this_page) {
2044 block_end = block_start + blocksize;
2045 if (block_end <= from || block_start >= to) {
2046 if (!buffer_uptodate(bh))
2047 partial = 1;
2048 } else {
2049 set_buffer_uptodate(bh);
2050 mark_buffer_dirty(bh);
2051 }
2052 }
2053
2054 /*
2055 * If this is a partial write which happened to make all buffers
2056 * uptodate then we can optimize away a bogus readpage() for
2057 * the next read(). Here we 'discover' whether the page went
2058 * uptodate as a result of this (potentially partial) write.
2059 */
2060 if (!partial)
2061 SetPageUptodate(page);
2062 return 0;
2063}
2064
2065/*
2066 * Generic "read page" function for block devices that have the normal
2067 * get_block functionality. This is most of the block device filesystems.
2068 * Reads the page asynchronously --- the unlock_buffer() and
2069 * set/clear_buffer_uptodate() functions propagate buffer state into the
2070 * page struct once IO has completed.
2071 */
2072int block_read_full_page(struct page *page, get_block_t *get_block)
2073{
2074 struct inode *inode = page->mapping->host;
2075 sector_t iblock, lblock;
2076 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2077 unsigned int blocksize;
2078 int nr, i;
2079 int fully_mapped = 1;
2080
2081 if (!PageLocked(page))
2082 PAGE_BUG(page);
2083 blocksize = 1 << inode->i_blkbits;
2084 if (!page_has_buffers(page))
2085 create_empty_buffers(page, blocksize, 0);
2086 head = page_buffers(page);
2087
2088 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2089 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2090 bh = head;
2091 nr = 0;
2092 i = 0;
2093
2094 do {
2095 if (buffer_uptodate(bh))
2096 continue;
2097
2098 if (!buffer_mapped(bh)) {
2099 fully_mapped = 0;
2100 if (iblock < lblock) {
2101 if (get_block(inode, iblock, bh, 0))
2102 SetPageError(page);
2103 }
2104 if (!buffer_mapped(bh)) {
2105 void *kaddr = kmap_atomic(page, KM_USER0);
2106 memset(kaddr + i * blocksize, 0, blocksize);
2107 flush_dcache_page(page);
2108 kunmap_atomic(kaddr, KM_USER0);
2109 set_buffer_uptodate(bh);
2110 continue;
2111 }
2112 /*
2113 * get_block() might have updated the buffer
2114 * synchronously
2115 */
2116 if (buffer_uptodate(bh))
2117 continue;
2118 }
2119 arr[nr++] = bh;
2120 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2121
2122 if (fully_mapped)
2123 SetPageMappedToDisk(page);
2124
2125 if (!nr) {
2126 /*
2127 * All buffers are uptodate - we can set the page uptodate
2128 * as well. But not if get_block() returned an error.
2129 */
2130 if (!PageError(page))
2131 SetPageUptodate(page);
2132 unlock_page(page);
2133 return 0;
2134 }
2135
2136 /* Stage two: lock the buffers */
2137 for (i = 0; i < nr; i++) {
2138 bh = arr[i];
2139 lock_buffer(bh);
2140 mark_buffer_async_read(bh);
2141 }
2142
2143 /*
2144 * Stage 3: start the IO. Check for uptodateness
2145 * inside the buffer lock in case another process reading
2146 * the underlying blockdev brought it uptodate (the sct fix).
2147 */
2148 for (i = 0; i < nr; i++) {
2149 bh = arr[i];
2150 if (buffer_uptodate(bh))
2151 end_buffer_async_read(bh, 1);
2152 else
2153 submit_bh(READ, bh);
2154 }
2155 return 0;
2156}
2157
2158/* utility function for filesystems that need to do work on expanding
2159 * truncates. Uses prepare/commit_write to allow the filesystem to
2160 * deal with the hole.
2161 */
2162int generic_cont_expand(struct inode *inode, loff_t size)
2163{
2164 struct address_space *mapping = inode->i_mapping;
2165 struct page *page;
2166 unsigned long index, offset, limit;
2167 int err;
2168
2169 err = -EFBIG;
2170 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2171 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2172 send_sig(SIGXFSZ, current, 0);
2173 goto out;
2174 }
2175 if (size > inode->i_sb->s_maxbytes)
2176 goto out;
2177
2178 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
2179
2180 /* ugh. in prepare/commit_write, if from==to==start of block, we
2181 ** skip the prepare. make sure we never send an offset for the start
2182 ** of a block
2183 */
2184 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2185 offset++;
2186 }
2187 index = size >> PAGE_CACHE_SHIFT;
2188 err = -ENOMEM;
2189 page = grab_cache_page(mapping, index);
2190 if (!page)
2191 goto out;
2192 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2193 if (!err) {
2194 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2195 }
2196 unlock_page(page);
2197 page_cache_release(page);
2198 if (err > 0)
2199 err = 0;
2200out:
2201 return err;
2202}
2203
2204/*
2205 * For moronic filesystems that do not allow holes in file.
2206 * We may have to extend the file.
2207 */
2208
2209int cont_prepare_write(struct page *page, unsigned offset,
2210 unsigned to, get_block_t *get_block, loff_t *bytes)
2211{
2212 struct address_space *mapping = page->mapping;
2213 struct inode *inode = mapping->host;
2214 struct page *new_page;
2215 pgoff_t pgpos;
2216 long status;
2217 unsigned zerofrom;
2218 unsigned blocksize = 1 << inode->i_blkbits;
2219 void *kaddr;
2220
2221 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2222 status = -ENOMEM;
2223 new_page = grab_cache_page(mapping, pgpos);
2224 if (!new_page)
2225 goto out;
2226 /* we might sleep */
2227 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2228 unlock_page(new_page);
2229 page_cache_release(new_page);
2230 continue;
2231 }
2232 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2233 if (zerofrom & (blocksize-1)) {
2234 *bytes |= (blocksize-1);
2235 (*bytes)++;
2236 }
2237 status = __block_prepare_write(inode, new_page, zerofrom,
2238 PAGE_CACHE_SIZE, get_block);
2239 if (status)
2240 goto out_unmap;
2241 kaddr = kmap_atomic(new_page, KM_USER0);
2242 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2243 flush_dcache_page(new_page);
2244 kunmap_atomic(kaddr, KM_USER0);
2245 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2246 unlock_page(new_page);
2247 page_cache_release(new_page);
2248 }
2249
2250 if (page->index < pgpos) {
2251 /* completely inside the area */
2252 zerofrom = offset;
2253 } else {
2254 /* page covers the boundary, find the boundary offset */
2255 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2256
2257 /* if we will expand the thing last block will be filled */
2258 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2259 *bytes |= (blocksize-1);
2260 (*bytes)++;
2261 }
2262
2263 /* starting below the boundary? Nothing to zero out */
2264 if (offset <= zerofrom)
2265 zerofrom = offset;
2266 }
2267 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2268 if (status)
2269 goto out1;
2270 if (zerofrom < offset) {
2271 kaddr = kmap_atomic(page, KM_USER0);
2272 memset(kaddr+zerofrom, 0, offset-zerofrom);
2273 flush_dcache_page(page);
2274 kunmap_atomic(kaddr, KM_USER0);
2275 __block_commit_write(inode, page, zerofrom, offset);
2276 }
2277 return 0;
2278out1:
2279 ClearPageUptodate(page);
2280 return status;
2281
2282out_unmap:
2283 ClearPageUptodate(new_page);
2284 unlock_page(new_page);
2285 page_cache_release(new_page);
2286out:
2287 return status;
2288}
2289
2290int block_prepare_write(struct page *page, unsigned from, unsigned to,
2291 get_block_t *get_block)
2292{
2293 struct inode *inode = page->mapping->host;
2294 int err = __block_prepare_write(inode, page, from, to, get_block);
2295 if (err)
2296 ClearPageUptodate(page);
2297 return err;
2298}
2299
2300int block_commit_write(struct page *page, unsigned from, unsigned to)
2301{
2302 struct inode *inode = page->mapping->host;
2303 __block_commit_write(inode,page,from,to);
2304 return 0;
2305}
2306
2307int generic_commit_write(struct file *file, struct page *page,
2308 unsigned from, unsigned to)
2309{
2310 struct inode *inode = page->mapping->host;
2311 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2312 __block_commit_write(inode,page,from,to);
2313 /*
2314 * No need to use i_size_read() here, the i_size
2315 * cannot change under us because we hold i_sem.
2316 */
2317 if (pos > inode->i_size) {
2318 i_size_write(inode, pos);
2319 mark_inode_dirty(inode);
2320 }
2321 return 0;
2322}
2323
2324
2325/*
2326 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2327 * immediately, while under the page lock. So it needs a special end_io
2328 * handler which does not touch the bh after unlocking it.
2329 *
2330 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2331 * a race there is benign: unlock_buffer() only use the bh's address for
2332 * hashing after unlocking the buffer, so it doesn't actually touch the bh
2333 * itself.
2334 */
2335static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2336{
2337 if (uptodate) {
2338 set_buffer_uptodate(bh);
2339 } else {
2340 /* This happens, due to failed READA attempts. */
2341 clear_buffer_uptodate(bh);
2342 }
2343 unlock_buffer(bh);
2344}
2345
2346/*
2347 * On entry, the page is fully not uptodate.
2348 * On exit the page is fully uptodate in the areas outside (from,to)
2349 */
2350int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2351 get_block_t *get_block)
2352{
2353 struct inode *inode = page->mapping->host;
2354 const unsigned blkbits = inode->i_blkbits;
2355 const unsigned blocksize = 1 << blkbits;
2356 struct buffer_head map_bh;
2357 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2358 unsigned block_in_page;
2359 unsigned block_start;
2360 sector_t block_in_file;
2361 char *kaddr;
2362 int nr_reads = 0;
2363 int i;
2364 int ret = 0;
2365 int is_mapped_to_disk = 1;
2366 int dirtied_it = 0;
2367
2368 if (PageMappedToDisk(page))
2369 return 0;
2370
2371 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2372 map_bh.b_page = page;
2373
2374 /*
2375 * We loop across all blocks in the page, whether or not they are
2376 * part of the affected region. This is so we can discover if the
2377 * page is fully mapped-to-disk.
2378 */
2379 for (block_start = 0, block_in_page = 0;
2380 block_start < PAGE_CACHE_SIZE;
2381 block_in_page++, block_start += blocksize) {
2382 unsigned block_end = block_start + blocksize;
2383 int create;
2384
2385 map_bh.b_state = 0;
2386 create = 1;
2387 if (block_start >= to)
2388 create = 0;
2389 ret = get_block(inode, block_in_file + block_in_page,
2390 &map_bh, create);
2391 if (ret)
2392 goto failed;
2393 if (!buffer_mapped(&map_bh))
2394 is_mapped_to_disk = 0;
2395 if (buffer_new(&map_bh))
2396 unmap_underlying_metadata(map_bh.b_bdev,
2397 map_bh.b_blocknr);
2398 if (PageUptodate(page))
2399 continue;
2400 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2401 kaddr = kmap_atomic(page, KM_USER0);
2402 if (block_start < from) {
2403 memset(kaddr+block_start, 0, from-block_start);
2404 dirtied_it = 1;
2405 }
2406 if (block_end > to) {
2407 memset(kaddr + to, 0, block_end - to);
2408 dirtied_it = 1;
2409 }
2410 flush_dcache_page(page);
2411 kunmap_atomic(kaddr, KM_USER0);
2412 continue;
2413 }
2414 if (buffer_uptodate(&map_bh))
2415 continue; /* reiserfs does this */
2416 if (block_start < from || block_end > to) {
2417 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2418
2419 if (!bh) {
2420 ret = -ENOMEM;
2421 goto failed;
2422 }
2423 bh->b_state = map_bh.b_state;
2424 atomic_set(&bh->b_count, 0);
2425 bh->b_this_page = NULL;
2426 bh->b_page = page;
2427 bh->b_blocknr = map_bh.b_blocknr;
2428 bh->b_size = blocksize;
2429 bh->b_data = (char *)(long)block_start;
2430 bh->b_bdev = map_bh.b_bdev;
2431 bh->b_private = NULL;
2432 read_bh[nr_reads++] = bh;
2433 }
2434 }
2435
2436 if (nr_reads) {
2437 struct buffer_head *bh;
2438
2439 /*
2440 * The page is locked, so these buffers are protected from
2441 * any VM or truncate activity. Hence we don't need to care
2442 * for the buffer_head refcounts.
2443 */
2444 for (i = 0; i < nr_reads; i++) {
2445 bh = read_bh[i];
2446 lock_buffer(bh);
2447 bh->b_end_io = end_buffer_read_nobh;
2448 submit_bh(READ, bh);
2449 }
2450 for (i = 0; i < nr_reads; i++) {
2451 bh = read_bh[i];
2452 wait_on_buffer(bh);
2453 if (!buffer_uptodate(bh))
2454 ret = -EIO;
2455 free_buffer_head(bh);
2456 read_bh[i] = NULL;
2457 }
2458 if (ret)
2459 goto failed;
2460 }
2461
2462 if (is_mapped_to_disk)
2463 SetPageMappedToDisk(page);
2464 SetPageUptodate(page);
2465
2466 /*
2467 * Setting the page dirty here isn't necessary for the prepare_write
2468 * function - commit_write will do that. But if/when this function is
2469 * used within the pagefault handler to ensure that all mmapped pages
2470 * have backing space in the filesystem, we will need to dirty the page
2471 * if its contents were altered.
2472 */
2473 if (dirtied_it)
2474 set_page_dirty(page);
2475
2476 return 0;
2477
2478failed:
2479 for (i = 0; i < nr_reads; i++) {
2480 if (read_bh[i])
2481 free_buffer_head(read_bh[i]);
2482 }
2483
2484 /*
2485 * Error recovery is pretty slack. Clear the page and mark it dirty
2486 * so we'll later zero out any blocks which _were_ allocated.
2487 */
2488 kaddr = kmap_atomic(page, KM_USER0);
2489 memset(kaddr, 0, PAGE_CACHE_SIZE);
2490 kunmap_atomic(kaddr, KM_USER0);
2491 SetPageUptodate(page);
2492 set_page_dirty(page);
2493 return ret;
2494}
2495EXPORT_SYMBOL(nobh_prepare_write);
2496
2497int nobh_commit_write(struct file *file, struct page *page,
2498 unsigned from, unsigned to)
2499{
2500 struct inode *inode = page->mapping->host;
2501 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2502
2503 set_page_dirty(page);
2504 if (pos > inode->i_size) {
2505 i_size_write(inode, pos);
2506 mark_inode_dirty(inode);
2507 }
2508 return 0;
2509}
2510EXPORT_SYMBOL(nobh_commit_write);
2511
2512/*
2513 * nobh_writepage() - based on block_full_write_page() except
2514 * that it tries to operate without attaching bufferheads to
2515 * the page.
2516 */
2517int nobh_writepage(struct page *page, get_block_t *get_block,
2518 struct writeback_control *wbc)
2519{
2520 struct inode * const inode = page->mapping->host;
2521 loff_t i_size = i_size_read(inode);
2522 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2523 unsigned offset;
2524 void *kaddr;
2525 int ret;
2526
2527 /* Is the page fully inside i_size? */
2528 if (page->index < end_index)
2529 goto out;
2530
2531 /* Is the page fully outside i_size? (truncate in progress) */
2532 offset = i_size & (PAGE_CACHE_SIZE-1);
2533 if (page->index >= end_index+1 || !offset) {
2534 /*
2535 * The page may have dirty, unmapped buffers. For example,
2536 * they may have been added in ext3_writepage(). Make them
2537 * freeable here, so the page does not leak.
2538 */
2539#if 0
2540 /* Not really sure about this - do we need this ? */
2541 if (page->mapping->a_ops->invalidatepage)
2542 page->mapping->a_ops->invalidatepage(page, offset);
2543#endif
2544 unlock_page(page);
2545 return 0; /* don't care */
2546 }
2547
2548 /*
2549 * The page straddles i_size. It must be zeroed out on each and every
2550 * writepage invocation because it may be mmapped. "A file is mapped
2551 * in multiples of the page size. For a file that is not a multiple of
2552 * the page size, the remaining memory is zeroed when mapped, and
2553 * writes to that region are not written out to the file."
2554 */
2555 kaddr = kmap_atomic(page, KM_USER0);
2556 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2557 flush_dcache_page(page);
2558 kunmap_atomic(kaddr, KM_USER0);
2559out:
2560 ret = mpage_writepage(page, get_block, wbc);
2561 if (ret == -EAGAIN)
2562 ret = __block_write_full_page(inode, page, get_block, wbc);
2563 return ret;
2564}
2565EXPORT_SYMBOL(nobh_writepage);
2566
2567/*
2568 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2569 */
2570int nobh_truncate_page(struct address_space *mapping, loff_t from)
2571{
2572 struct inode *inode = mapping->host;
2573 unsigned blocksize = 1 << inode->i_blkbits;
2574 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2575 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2576 unsigned to;
2577 struct page *page;
2578 struct address_space_operations *a_ops = mapping->a_ops;
2579 char *kaddr;
2580 int ret = 0;
2581
2582 if ((offset & (blocksize - 1)) == 0)
2583 goto out;
2584
2585 ret = -ENOMEM;
2586 page = grab_cache_page(mapping, index);
2587 if (!page)
2588 goto out;
2589
2590 to = (offset + blocksize) & ~(blocksize - 1);
2591 ret = a_ops->prepare_write(NULL, page, offset, to);
2592 if (ret == 0) {
2593 kaddr = kmap_atomic(page, KM_USER0);
2594 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2595 flush_dcache_page(page);
2596 kunmap_atomic(kaddr, KM_USER0);
2597 set_page_dirty(page);
2598 }
2599 unlock_page(page);
2600 page_cache_release(page);
2601out:
2602 return ret;
2603}
2604EXPORT_SYMBOL(nobh_truncate_page);
2605
2606int block_truncate_page(struct address_space *mapping,
2607 loff_t from, get_block_t *get_block)
2608{
2609 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2610 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2611 unsigned blocksize;
2612 pgoff_t iblock;
2613 unsigned length, pos;
2614 struct inode *inode = mapping->host;
2615 struct page *page;
2616 struct buffer_head *bh;
2617 void *kaddr;
2618 int err;
2619
2620 blocksize = 1 << inode->i_blkbits;
2621 length = offset & (blocksize - 1);
2622
2623 /* Block boundary? Nothing to do */
2624 if (!length)
2625 return 0;
2626
2627 length = blocksize - length;
2628 iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2629
2630 page = grab_cache_page(mapping, index);
2631 err = -ENOMEM;
2632 if (!page)
2633 goto out;
2634
2635 if (!page_has_buffers(page))
2636 create_empty_buffers(page, blocksize, 0);
2637
2638 /* Find the buffer that contains "offset" */
2639 bh = page_buffers(page);
2640 pos = blocksize;
2641 while (offset >= pos) {
2642 bh = bh->b_this_page;
2643 iblock++;
2644 pos += blocksize;
2645 }
2646
2647 err = 0;
2648 if (!buffer_mapped(bh)) {
2649 err = get_block(inode, iblock, bh, 0);
2650 if (err)
2651 goto unlock;
2652 /* unmapped? It's a hole - nothing to do */
2653 if (!buffer_mapped(bh))
2654 goto unlock;
2655 }
2656
2657 /* Ok, it's mapped. Make sure it's up-to-date */
2658 if (PageUptodate(page))
2659 set_buffer_uptodate(bh);
2660
2661 if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2662 err = -EIO;
2663 ll_rw_block(READ, 1, &bh);
2664 wait_on_buffer(bh);
2665 /* Uhhuh. Read error. Complain and punt. */
2666 if (!buffer_uptodate(bh))
2667 goto unlock;
2668 }
2669
2670 kaddr = kmap_atomic(page, KM_USER0);
2671 memset(kaddr + offset, 0, length);
2672 flush_dcache_page(page);
2673 kunmap_atomic(kaddr, KM_USER0);
2674
2675 mark_buffer_dirty(bh);
2676 err = 0;
2677
2678unlock:
2679 unlock_page(page);
2680 page_cache_release(page);
2681out:
2682 return err;
2683}
2684
2685/*
2686 * The generic ->writepage function for buffer-backed address_spaces
2687 */
2688int block_write_full_page(struct page *page, get_block_t *get_block,
2689 struct writeback_control *wbc)
2690{
2691 struct inode * const inode = page->mapping->host;
2692 loff_t i_size = i_size_read(inode);
2693 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2694 unsigned offset;
2695 void *kaddr;
2696
2697 /* Is the page fully inside i_size? */
2698 if (page->index < end_index)
2699 return __block_write_full_page(inode, page, get_block, wbc);
2700
2701 /* Is the page fully outside i_size? (truncate in progress) */
2702 offset = i_size & (PAGE_CACHE_SIZE-1);
2703 if (page->index >= end_index+1 || !offset) {
2704 /*
2705 * The page may have dirty, unmapped buffers. For example,
2706 * they may have been added in ext3_writepage(). Make them
2707 * freeable here, so the page does not leak.
2708 */
2709 block_invalidatepage(page, 0);
2710 unlock_page(page);
2711 return 0; /* don't care */
2712 }
2713
2714 /*
2715 * The page straddles i_size. It must be zeroed out on each and every
2716 * writepage invokation because it may be mmapped. "A file is mapped
2717 * in multiples of the page size. For a file that is not a multiple of
2718 * the page size, the remaining memory is zeroed when mapped, and
2719 * writes to that region are not written out to the file."
2720 */
2721 kaddr = kmap_atomic(page, KM_USER0);
2722 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2723 flush_dcache_page(page);
2724 kunmap_atomic(kaddr, KM_USER0);
2725 return __block_write_full_page(inode, page, get_block, wbc);
2726}
2727
2728sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2729 get_block_t *get_block)
2730{
2731 struct buffer_head tmp;
2732 struct inode *inode = mapping->host;
2733 tmp.b_state = 0;
2734 tmp.b_blocknr = 0;
2735 get_block(inode, block, &tmp, 0);
2736 return tmp.b_blocknr;
2737}
2738
2739static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2740{
2741 struct buffer_head *bh = bio->bi_private;
2742
2743 if (bio->bi_size)
2744 return 1;
2745
2746 if (err == -EOPNOTSUPP) {
2747 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2748 set_bit(BH_Eopnotsupp, &bh->b_state);
2749 }
2750
2751 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2752 bio_put(bio);
2753 return 0;
2754}
2755
2756int submit_bh(int rw, struct buffer_head * bh)
2757{
2758 struct bio *bio;
2759 int ret = 0;
2760
2761 BUG_ON(!buffer_locked(bh));
2762 BUG_ON(!buffer_mapped(bh));
2763 BUG_ON(!bh->b_end_io);
2764
2765 if (buffer_ordered(bh) && (rw == WRITE))
2766 rw = WRITE_BARRIER;
2767
2768 /*
2769 * Only clear out a write error when rewriting, should this
2770 * include WRITE_SYNC as well?
2771 */
2772 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2773 clear_buffer_write_io_error(bh);
2774
2775 /*
2776 * from here on down, it's all bio -- do the initial mapping,
2777 * submit_bio -> generic_make_request may further map this bio around
2778 */
2779 bio = bio_alloc(GFP_NOIO, 1);
2780
2781 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2782 bio->bi_bdev = bh->b_bdev;
2783 bio->bi_io_vec[0].bv_page = bh->b_page;
2784 bio->bi_io_vec[0].bv_len = bh->b_size;
2785 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2786
2787 bio->bi_vcnt = 1;
2788 bio->bi_idx = 0;
2789 bio->bi_size = bh->b_size;
2790
2791 bio->bi_end_io = end_bio_bh_io_sync;
2792 bio->bi_private = bh;
2793
2794 bio_get(bio);
2795 submit_bio(rw, bio);
2796
2797 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2798 ret = -EOPNOTSUPP;
2799
2800 bio_put(bio);
2801 return ret;
2802}
2803
2804/**
2805 * ll_rw_block: low-level access to block devices (DEPRECATED)
2806 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2807 * @nr: number of &struct buffer_heads in the array
2808 * @bhs: array of pointers to &struct buffer_head
2809 *
2810 * ll_rw_block() takes an array of pointers to &struct buffer_heads,
2811 * and requests an I/O operation on them, either a %READ or a %WRITE.
2812 * The third %READA option is described in the documentation for
2813 * generic_make_request() which ll_rw_block() calls.
2814 *
2815 * This function drops any buffer that it cannot get a lock on (with the
2816 * BH_Lock state bit), any buffer that appears to be clean when doing a
2817 * write request, and any buffer that appears to be up-to-date when doing
2818 * read request. Further it marks as clean buffers that are processed for
2819 * writing (the buffer cache won't assume that they are actually clean until
2820 * the buffer gets unlocked).
2821 *
2822 * ll_rw_block sets b_end_io to simple completion handler that marks
2823 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2824 * any waiters.
2825 *
2826 * All of the buffers must be for the same device, and must also be a
2827 * multiple of the current approved size for the device.
2828 */
2829void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2830{
2831 int i;
2832
2833 for (i = 0; i < nr; i++) {
2834 struct buffer_head *bh = bhs[i];
2835
2836 if (test_set_buffer_locked(bh))
2837 continue;
2838
2839 get_bh(bh);
2840 if (rw == WRITE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841 if (test_clear_buffer_dirty(bh)) {
akpm@osdl.org76c30732005-04-16 15:24:07 -07002842 bh->b_end_io = end_buffer_write_sync;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843 submit_bh(WRITE, bh);
2844 continue;
2845 }
2846 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 if (!buffer_uptodate(bh)) {
akpm@osdl.org76c30732005-04-16 15:24:07 -07002848 bh->b_end_io = end_buffer_read_sync;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849 submit_bh(rw, bh);
2850 continue;
2851 }
2852 }
2853 unlock_buffer(bh);
2854 put_bh(bh);
2855 }
2856}
2857
2858/*
2859 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2860 * and then start new I/O and then wait upon it. The caller must have a ref on
2861 * the buffer_head.
2862 */
2863int sync_dirty_buffer(struct buffer_head *bh)
2864{
2865 int ret = 0;
2866
2867 WARN_ON(atomic_read(&bh->b_count) < 1);
2868 lock_buffer(bh);
2869 if (test_clear_buffer_dirty(bh)) {
2870 get_bh(bh);
2871 bh->b_end_io = end_buffer_write_sync;
2872 ret = submit_bh(WRITE, bh);
2873 wait_on_buffer(bh);
2874 if (buffer_eopnotsupp(bh)) {
2875 clear_buffer_eopnotsupp(bh);
2876 ret = -EOPNOTSUPP;
2877 }
2878 if (!ret && !buffer_uptodate(bh))
2879 ret = -EIO;
2880 } else {
2881 unlock_buffer(bh);
2882 }
2883 return ret;
2884}
2885
2886/*
2887 * try_to_free_buffers() checks if all the buffers on this particular page
2888 * are unused, and releases them if so.
2889 *
2890 * Exclusion against try_to_free_buffers may be obtained by either
2891 * locking the page or by holding its mapping's private_lock.
2892 *
2893 * If the page is dirty but all the buffers are clean then we need to
2894 * be sure to mark the page clean as well. This is because the page
2895 * may be against a block device, and a later reattachment of buffers
2896 * to a dirty page will set *all* buffers dirty. Which would corrupt
2897 * filesystem data on the same device.
2898 *
2899 * The same applies to regular filesystem pages: if all the buffers are
2900 * clean then we set the page clean and proceed. To do that, we require
2901 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2902 * private_lock.
2903 *
2904 * try_to_free_buffers() is non-blocking.
2905 */
2906static inline int buffer_busy(struct buffer_head *bh)
2907{
2908 return atomic_read(&bh->b_count) |
2909 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2910}
2911
2912static int
2913drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2914{
2915 struct buffer_head *head = page_buffers(page);
2916 struct buffer_head *bh;
2917
2918 bh = head;
2919 do {
2920 if (buffer_write_io_error(bh))
2921 set_bit(AS_EIO, &page->mapping->flags);
2922 if (buffer_busy(bh))
2923 goto failed;
2924 bh = bh->b_this_page;
2925 } while (bh != head);
2926
2927 do {
2928 struct buffer_head *next = bh->b_this_page;
2929
2930 if (!list_empty(&bh->b_assoc_buffers))
2931 __remove_assoc_queue(bh);
2932 bh = next;
2933 } while (bh != head);
2934 *buffers_to_free = head;
2935 __clear_page_buffers(page);
2936 return 1;
2937failed:
2938 return 0;
2939}
2940
2941int try_to_free_buffers(struct page *page)
2942{
2943 struct address_space * const mapping = page->mapping;
2944 struct buffer_head *buffers_to_free = NULL;
2945 int ret = 0;
2946
2947 BUG_ON(!PageLocked(page));
2948 if (PageWriteback(page))
2949 return 0;
2950
2951 if (mapping == NULL) { /* can this still happen? */
2952 ret = drop_buffers(page, &buffers_to_free);
2953 goto out;
2954 }
2955
2956 spin_lock(&mapping->private_lock);
2957 ret = drop_buffers(page, &buffers_to_free);
2958 if (ret) {
2959 /*
2960 * If the filesystem writes its buffers by hand (eg ext3)
2961 * then we can have clean buffers against a dirty page. We
2962 * clean the page here; otherwise later reattachment of buffers
2963 * could encounter a non-uptodate page, which is unresolvable.
2964 * This only applies in the rare case where try_to_free_buffers
2965 * succeeds but the page is not freed.
2966 */
2967 clear_page_dirty(page);
2968 }
2969 spin_unlock(&mapping->private_lock);
2970out:
2971 if (buffers_to_free) {
2972 struct buffer_head *bh = buffers_to_free;
2973
2974 do {
2975 struct buffer_head *next = bh->b_this_page;
2976 free_buffer_head(bh);
2977 bh = next;
2978 } while (bh != buffers_to_free);
2979 }
2980 return ret;
2981}
2982EXPORT_SYMBOL(try_to_free_buffers);
2983
2984int block_sync_page(struct page *page)
2985{
2986 struct address_space *mapping;
2987
2988 smp_mb();
2989 mapping = page_mapping(page);
2990 if (mapping)
2991 blk_run_backing_dev(mapping->backing_dev_info, page);
2992 return 0;
2993}
2994
2995/*
2996 * There are no bdflush tunables left. But distributions are
2997 * still running obsolete flush daemons, so we terminate them here.
2998 *
2999 * Use of bdflush() is deprecated and will be removed in a future kernel.
3000 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3001 */
3002asmlinkage long sys_bdflush(int func, long data)
3003{
3004 static int msg_count;
3005
3006 if (!capable(CAP_SYS_ADMIN))
3007 return -EPERM;
3008
3009 if (msg_count < 5) {
3010 msg_count++;
3011 printk(KERN_INFO
3012 "warning: process `%s' used the obsolete bdflush"
3013 " system call\n", current->comm);
3014 printk(KERN_INFO "Fix your initscripts?\n");
3015 }
3016
3017 if (func == 1)
3018 do_exit(0);
3019 return 0;
3020}
3021
3022/*
3023 * Buffer-head allocation
3024 */
3025static kmem_cache_t *bh_cachep;
3026
3027/*
3028 * Once the number of bh's in the machine exceeds this level, we start
3029 * stripping them in writeback.
3030 */
3031static int max_buffer_heads;
3032
3033int buffer_heads_over_limit;
3034
3035struct bh_accounting {
3036 int nr; /* Number of live bh's */
3037 int ratelimit; /* Limit cacheline bouncing */
3038};
3039
3040static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3041
3042static void recalc_bh_state(void)
3043{
3044 int i;
3045 int tot = 0;
3046
3047 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3048 return;
3049 __get_cpu_var(bh_accounting).ratelimit = 0;
3050 for_each_cpu(i)
3051 tot += per_cpu(bh_accounting, i).nr;
3052 buffer_heads_over_limit = (tot > max_buffer_heads);
3053}
3054
3055struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags)
3056{
3057 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3058 if (ret) {
3059 preempt_disable();
3060 __get_cpu_var(bh_accounting).nr++;
3061 recalc_bh_state();
3062 preempt_enable();
3063 }
3064 return ret;
3065}
3066EXPORT_SYMBOL(alloc_buffer_head);
3067
3068void free_buffer_head(struct buffer_head *bh)
3069{
3070 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3071 kmem_cache_free(bh_cachep, bh);
3072 preempt_disable();
3073 __get_cpu_var(bh_accounting).nr--;
3074 recalc_bh_state();
3075 preempt_enable();
3076}
3077EXPORT_SYMBOL(free_buffer_head);
3078
3079static void
3080init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
3081{
3082 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
3083 SLAB_CTOR_CONSTRUCTOR) {
3084 struct buffer_head * bh = (struct buffer_head *)data;
3085
3086 memset(bh, 0, sizeof(*bh));
3087 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3088 }
3089}
3090
3091#ifdef CONFIG_HOTPLUG_CPU
3092static void buffer_exit_cpu(int cpu)
3093{
3094 int i;
3095 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3096
3097 for (i = 0; i < BH_LRU_SIZE; i++) {
3098 brelse(b->bhs[i]);
3099 b->bhs[i] = NULL;
3100 }
3101}
3102
3103static int buffer_cpu_notify(struct notifier_block *self,
3104 unsigned long action, void *hcpu)
3105{
3106 if (action == CPU_DEAD)
3107 buffer_exit_cpu((unsigned long)hcpu);
3108 return NOTIFY_OK;
3109}
3110#endif /* CONFIG_HOTPLUG_CPU */
3111
3112void __init buffer_init(void)
3113{
3114 int nrpages;
3115
3116 bh_cachep = kmem_cache_create("buffer_head",
3117 sizeof(struct buffer_head), 0,
3118 SLAB_PANIC, init_buffer_head, NULL);
3119
3120 /*
3121 * Limit the bh occupancy to 10% of ZONE_NORMAL
3122 */
3123 nrpages = (nr_free_buffer_pages() * 10) / 100;
3124 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3125 hotcpu_notifier(buffer_cpu_notify, 0);
3126}
3127
3128EXPORT_SYMBOL(__bforget);
3129EXPORT_SYMBOL(__brelse);
3130EXPORT_SYMBOL(__wait_on_buffer);
3131EXPORT_SYMBOL(block_commit_write);
3132EXPORT_SYMBOL(block_prepare_write);
3133EXPORT_SYMBOL(block_read_full_page);
3134EXPORT_SYMBOL(block_sync_page);
3135EXPORT_SYMBOL(block_truncate_page);
3136EXPORT_SYMBOL(block_write_full_page);
3137EXPORT_SYMBOL(cont_prepare_write);
3138EXPORT_SYMBOL(end_buffer_async_write);
3139EXPORT_SYMBOL(end_buffer_read_sync);
3140EXPORT_SYMBOL(end_buffer_write_sync);
3141EXPORT_SYMBOL(file_fsync);
3142EXPORT_SYMBOL(fsync_bdev);
3143EXPORT_SYMBOL(generic_block_bmap);
3144EXPORT_SYMBOL(generic_commit_write);
3145EXPORT_SYMBOL(generic_cont_expand);
3146EXPORT_SYMBOL(init_buffer);
3147EXPORT_SYMBOL(invalidate_bdev);
3148EXPORT_SYMBOL(ll_rw_block);
3149EXPORT_SYMBOL(mark_buffer_dirty);
3150EXPORT_SYMBOL(submit_bh);
3151EXPORT_SYMBOL(sync_dirty_buffer);
3152EXPORT_SYMBOL(unlock_buffer);