blob: a13f09b696f7f2da6adbd3cfeee299c9b6840ac6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/kernel.h>
22#include <linux/syscalls.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
Randy Dunlap16f7e0f2006-01-11 12:17:46 -080027#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/blkdev.h>
29#include <linux/file.h>
30#include <linux/quotaops.h>
31#include <linux/highmem.h>
32#include <linux/module.h>
33#include <linux/writeback.h>
34#include <linux/hash.h>
35#include <linux/suspend.h>
36#include <linux/buffer_head.h>
Andrew Morton55e829a2006-12-10 02:19:27 -080037#include <linux/task_io_accounting_ops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/bio.h>
39#include <linux/notifier.h>
40#include <linux/cpu.h>
41#include <linux/bitops.h>
42#include <linux/mpage.h>
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070043#include <linux/bit_spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49inline void
50init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51{
52 bh->b_end_io = handler;
53 bh->b_private = private;
54}
55
56static int sync_buffer(void *word)
57{
58 struct block_device *bd;
59 struct buffer_head *bh
60 = container_of(word, struct buffer_head, b_state);
61
62 smp_mb();
63 bd = bh->b_bdev;
64 if (bd)
65 blk_run_address_space(bd->bd_inode->i_mapping);
66 io_schedule();
67 return 0;
68}
69
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -080070void __lock_buffer(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 TASK_UNINTERRUPTIBLE);
74}
75EXPORT_SYMBOL(__lock_buffer);
76
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -080077void unlock_buffer(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078{
Nick Piggin51b07fc2008-10-18 20:27:00 -070079 clear_bit_unlock(BH_Lock, &bh->b_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 smp_mb__after_clear_bit();
81 wake_up_bit(&bh->b_state, BH_Lock);
82}
83
84/*
85 * Block until a buffer comes unlocked. This doesn't stop it
86 * from becoming locked again - you have to lock it yourself
87 * if you want to preserve its state.
88 */
89void __wait_on_buffer(struct buffer_head * bh)
90{
91 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
92}
93
94static void
95__clear_page_buffers(struct page *page)
96{
97 ClearPagePrivate(page);
Hugh Dickins4c21e2f2005-10-29 18:16:40 -070098 set_page_private(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 page_cache_release(page);
100}
101
Keith Mannthey08bafc02008-11-25 10:24:35 +0100102
103static int quiet_error(struct buffer_head *bh)
104{
105 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
106 return 0;
107 return 1;
108}
109
110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111static void buffer_io_error(struct buffer_head *bh)
112{
113 char b[BDEVNAME_SIZE];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
115 bdevname(bh->b_bdev, b),
116 (unsigned long long)bh->b_blocknr);
117}
118
119/*
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700120 * End-of-IO handler helper function which does not touch the bh after
121 * unlocking it.
122 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
123 * a race there is benign: unlock_buffer() only use the bh's address for
124 * hashing after unlocking the buffer, so it doesn't actually touch the bh
125 * itself.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 */
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700127static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128{
129 if (uptodate) {
130 set_buffer_uptodate(bh);
131 } else {
132 /* This happens, due to failed READA attempts. */
133 clear_buffer_uptodate(bh);
134 }
135 unlock_buffer(bh);
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700136}
137
138/*
139 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
140 * unlock the buffer. This is what ll_rw_block uses too.
141 */
142void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
143{
144 __end_buffer_read_notouch(bh, uptodate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 put_bh(bh);
146}
147
148void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
149{
150 char b[BDEVNAME_SIZE];
151
152 if (uptodate) {
153 set_buffer_uptodate(bh);
154 } else {
Keith Mannthey08bafc02008-11-25 10:24:35 +0100155 if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 buffer_io_error(bh);
157 printk(KERN_WARNING "lost page write due to "
158 "I/O error on %s\n",
159 bdevname(bh->b_bdev, b));
160 }
161 set_buffer_write_io_error(bh);
162 clear_buffer_uptodate(bh);
163 }
164 unlock_buffer(bh);
165 put_bh(bh);
166}
167
168/*
169 * Write out and wait upon all the dirty data associated with a block
170 * device via its mapping. Does not take the superblock lock.
171 */
172int sync_blockdev(struct block_device *bdev)
173{
174 int ret = 0;
175
OGAWA Hirofumi28fd1292006-01-08 01:02:14 -0800176 if (bdev)
177 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 return ret;
179}
180EXPORT_SYMBOL(sync_blockdev);
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182/*
183 * Write out and wait upon all dirty data associated with this
184 * device. Filesystem data as well as the underlying block
185 * device. Takes the superblock lock.
186 */
187int fsync_bdev(struct block_device *bdev)
188{
189 struct super_block *sb = get_super(bdev);
190 if (sb) {
191 int res = fsync_super(sb);
192 drop_super(sb);
193 return res;
194 }
195 return sync_blockdev(bdev);
196}
197
198/**
199 * freeze_bdev -- lock a filesystem and force it into a consistent state
200 * @bdev: blockdevice to lock
201 *
David Chinnerf73ca1b2007-01-10 23:15:41 -0800202 * This takes the block device bd_mount_sem to make sure no new mounts
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 * happen on bdev until thaw_bdev() is called.
204 * If a superblock is found on this device, we take the s_umount semaphore
205 * on it to make sure nobody unmounts until the snapshot creation is done.
206 */
207struct super_block *freeze_bdev(struct block_device *bdev)
208{
209 struct super_block *sb;
210
David Chinnerf73ca1b2007-01-10 23:15:41 -0800211 down(&bdev->bd_mount_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 sb = get_super(bdev);
213 if (sb && !(sb->s_flags & MS_RDONLY)) {
214 sb->s_frozen = SB_FREEZE_WRITE;
akpm@osdl.orgd59dd462005-05-01 08:58:47 -0700215 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
OGAWA Hirofumid25b9a12006-03-25 03:07:44 -0800217 __fsync_super(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
219 sb->s_frozen = SB_FREEZE_TRANS;
akpm@osdl.orgd59dd462005-05-01 08:58:47 -0700220 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
222 sync_blockdev(sb->s_bdev);
223
224 if (sb->s_op->write_super_lockfs)
225 sb->s_op->write_super_lockfs(sb);
226 }
227
228 sync_blockdev(bdev);
229 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
230}
231EXPORT_SYMBOL(freeze_bdev);
232
233/**
234 * thaw_bdev -- unlock filesystem
235 * @bdev: blockdevice to unlock
236 * @sb: associated superblock
237 *
238 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
239 */
240void thaw_bdev(struct block_device *bdev, struct super_block *sb)
241{
242 if (sb) {
243 BUG_ON(sb->s_bdev != bdev);
244
245 if (sb->s_op->unlockfs)
246 sb->s_op->unlockfs(sb);
247 sb->s_frozen = SB_UNFROZEN;
akpm@osdl.orgd59dd462005-05-01 08:58:47 -0700248 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 wake_up(&sb->s_wait_unfrozen);
250 drop_super(sb);
251 }
252
David Chinnerf73ca1b2007-01-10 23:15:41 -0800253 up(&bdev->bd_mount_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254}
255EXPORT_SYMBOL(thaw_bdev);
256
257/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 * Various filesystems appear to want __find_get_block to be non-blocking.
259 * But it's the page lock which protects the buffers. To get around this,
260 * we get exclusion from try_to_free_buffers with the blockdev mapping's
261 * private_lock.
262 *
263 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
264 * may be quite high. This code could TryLock the page, and if that
265 * succeeds, there is no need to take private_lock. (But if
266 * private_lock is contended then so is mapping->tree_lock).
267 */
268static struct buffer_head *
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -0800269__find_get_block_slow(struct block_device *bdev, sector_t block)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270{
271 struct inode *bd_inode = bdev->bd_inode;
272 struct address_space *bd_mapping = bd_inode->i_mapping;
273 struct buffer_head *ret = NULL;
274 pgoff_t index;
275 struct buffer_head *bh;
276 struct buffer_head *head;
277 struct page *page;
278 int all_mapped = 1;
279
280 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
281 page = find_get_page(bd_mapping, index);
282 if (!page)
283 goto out;
284
285 spin_lock(&bd_mapping->private_lock);
286 if (!page_has_buffers(page))
287 goto out_unlock;
288 head = page_buffers(page);
289 bh = head;
290 do {
291 if (bh->b_blocknr == block) {
292 ret = bh;
293 get_bh(bh);
294 goto out_unlock;
295 }
296 if (!buffer_mapped(bh))
297 all_mapped = 0;
298 bh = bh->b_this_page;
299 } while (bh != head);
300
301 /* we might be here because some of the buffers on this page are
302 * not mapped. This is due to various races between
303 * file io on the block device and getblk. It gets dealt with
304 * elsewhere, don't buffer_error if we had some unmapped buffers
305 */
306 if (all_mapped) {
307 printk("__find_get_block_slow() failed. "
308 "block=%llu, b_blocknr=%llu\n",
Badari Pulavarty205f87f2006-03-26 01:38:00 -0800309 (unsigned long long)block,
310 (unsigned long long)bh->b_blocknr);
311 printk("b_state=0x%08lx, b_size=%zu\n",
312 bh->b_state, bh->b_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
314 }
315out_unlock:
316 spin_unlock(&bd_mapping->private_lock);
317 page_cache_release(page);
318out:
319 return ret;
320}
321
322/* If invalidate_buffers() will trash dirty buffers, it means some kind
323 of fs corruption is going on. Trashing dirty data always imply losing
324 information that was supposed to be just stored on the physical layer
325 by the user.
326
327 Thus invalidate_buffers in general usage is not allwowed to trash
328 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
329 be preserved. These buffers are simply skipped.
330
331 We also skip buffers which are still in use. For example this can
332 happen if a userspace program is reading the block device.
333
334 NOTE: In the case where the user removed a removable-media-disk even if
335 there's still dirty data not synced on disk (due a bug in the device driver
336 or due an error of the user), by not destroying the dirty buffers we could
337 generate corruption also on the next media inserted, thus a parameter is
338 necessary to handle this case in the most safe way possible (trying
339 to not corrupt also the new disk inserted with the data belonging to
340 the old now corrupted disk). Also for the ramdisk the natural thing
341 to do in order to release the ramdisk memory is to destroy dirty buffers.
342
343 These are two special cases. Normal usage imply the device driver
344 to issue a sync on the device (without waiting I/O completion) and
345 then an invalidate_buffers call that doesn't trash dirty buffers.
346
347 For handling cache coherency with the blkdev pagecache the 'update' case
348 is been introduced. It is needed to re-read from disk any pinned
349 buffer. NOTE: re-reading from disk is destructive so we can do it only
350 when we assume nobody is changing the buffercache under our I/O and when
351 we think the disk contains more recent information than the buffercache.
352 The update == 1 pass marks the buffers we need to update, the update == 2
353 pass does the actual I/O. */
Peter Zijlstraf98393a2007-05-06 14:49:54 -0700354void invalidate_bdev(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355{
Andrew Morton0e1dfc62006-07-30 03:03:28 -0700356 struct address_space *mapping = bdev->bd_inode->i_mapping;
357
358 if (mapping->nrpages == 0)
359 return;
360
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 invalidate_bh_lrus();
Andrew Mortonfc0ecff2007-02-10 01:45:39 -0800362 invalidate_mapping_pages(mapping, 0, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363}
364
365/*
366 * Kick pdflush then try to free up some ZONE_NORMAL memory.
367 */
368static void free_more_memory(void)
369{
Mel Gorman19770b32008-04-28 02:12:18 -0700370 struct zone *zone;
Mel Gorman0e884602008-04-28 02:12:14 -0700371 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
Pekka J Enberg687a21c2005-06-28 20:44:55 -0700373 wakeup_pdflush(1024);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 yield();
375
Mel Gorman0e884602008-04-28 02:12:14 -0700376 for_each_online_node(nid) {
Mel Gorman19770b32008-04-28 02:12:18 -0700377 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
378 gfp_zone(GFP_NOFS), NULL,
379 &zone);
380 if (zone)
Mel Gorman54a6eb52008-04-28 02:12:16 -0700381 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
382 GFP_NOFS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 }
384}
385
386/*
387 * I/O completion handler for block_read_full_page() - pages
388 * which come unlocked at the end of I/O.
389 */
390static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
391{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 unsigned long flags;
Nick Piggina3972202005-07-07 17:56:56 -0700393 struct buffer_head *first;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 struct buffer_head *tmp;
395 struct page *page;
396 int page_uptodate = 1;
397
398 BUG_ON(!buffer_async_read(bh));
399
400 page = bh->b_page;
401 if (uptodate) {
402 set_buffer_uptodate(bh);
403 } else {
404 clear_buffer_uptodate(bh);
Keith Mannthey08bafc02008-11-25 10:24:35 +0100405 if (!quiet_error(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 buffer_io_error(bh);
407 SetPageError(page);
408 }
409
410 /*
411 * Be _very_ careful from here on. Bad things can happen if
412 * two buffer heads end IO at almost the same time and both
413 * decide that the page is now completely done.
414 */
Nick Piggina3972202005-07-07 17:56:56 -0700415 first = page_buffers(page);
416 local_irq_save(flags);
417 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 clear_buffer_async_read(bh);
419 unlock_buffer(bh);
420 tmp = bh;
421 do {
422 if (!buffer_uptodate(tmp))
423 page_uptodate = 0;
424 if (buffer_async_read(tmp)) {
425 BUG_ON(!buffer_locked(tmp));
426 goto still_busy;
427 }
428 tmp = tmp->b_this_page;
429 } while (tmp != bh);
Nick Piggina3972202005-07-07 17:56:56 -0700430 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
431 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432
433 /*
434 * If none of the buffers had errors and they are all
435 * uptodate then we can set the page uptodate.
436 */
437 if (page_uptodate && !PageError(page))
438 SetPageUptodate(page);
439 unlock_page(page);
440 return;
441
442still_busy:
Nick Piggina3972202005-07-07 17:56:56 -0700443 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
444 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 return;
446}
447
448/*
449 * Completion handler for block_write_full_page() - pages which are unlocked
450 * during I/O, and which have PageWriteback cleared upon I/O completion.
451 */
Adrian Bunkb6cd0b72006-06-27 02:53:54 -0700452static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453{
454 char b[BDEVNAME_SIZE];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 unsigned long flags;
Nick Piggina3972202005-07-07 17:56:56 -0700456 struct buffer_head *first;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 struct buffer_head *tmp;
458 struct page *page;
459
460 BUG_ON(!buffer_async_write(bh));
461
462 page = bh->b_page;
463 if (uptodate) {
464 set_buffer_uptodate(bh);
465 } else {
Keith Mannthey08bafc02008-11-25 10:24:35 +0100466 if (!quiet_error(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 buffer_io_error(bh);
468 printk(KERN_WARNING "lost page write due to "
469 "I/O error on %s\n",
470 bdevname(bh->b_bdev, b));
471 }
472 set_bit(AS_EIO, &page->mapping->flags);
Jan Kara58ff4072006-10-17 00:10:19 -0700473 set_buffer_write_io_error(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 clear_buffer_uptodate(bh);
475 SetPageError(page);
476 }
477
Nick Piggina3972202005-07-07 17:56:56 -0700478 first = page_buffers(page);
479 local_irq_save(flags);
480 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
481
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 clear_buffer_async_write(bh);
483 unlock_buffer(bh);
484 tmp = bh->b_this_page;
485 while (tmp != bh) {
486 if (buffer_async_write(tmp)) {
487 BUG_ON(!buffer_locked(tmp));
488 goto still_busy;
489 }
490 tmp = tmp->b_this_page;
491 }
Nick Piggina3972202005-07-07 17:56:56 -0700492 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
493 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 end_page_writeback(page);
495 return;
496
497still_busy:
Nick Piggina3972202005-07-07 17:56:56 -0700498 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
499 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 return;
501}
502
503/*
504 * If a page's buffers are under async readin (end_buffer_async_read
505 * completion) then there is a possibility that another thread of
506 * control could lock one of the buffers after it has completed
507 * but while some of the other buffers have not completed. This
508 * locked buffer would confuse end_buffer_async_read() into not unlocking
509 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
510 * that this buffer is not under async I/O.
511 *
512 * The page comes unlocked when it has no locked buffer_async buffers
513 * left.
514 *
515 * PageLocked prevents anyone starting new async I/O reads any of
516 * the buffers.
517 *
518 * PageWriteback is used to prevent simultaneous writeout of the same
519 * page.
520 *
521 * PageLocked prevents anyone from starting writeback of a page which is
522 * under read I/O (PageWriteback is only ever set against a locked page).
523 */
524static void mark_buffer_async_read(struct buffer_head *bh)
525{
526 bh->b_end_io = end_buffer_async_read;
527 set_buffer_async_read(bh);
528}
529
530void mark_buffer_async_write(struct buffer_head *bh)
531{
532 bh->b_end_io = end_buffer_async_write;
533 set_buffer_async_write(bh);
534}
535EXPORT_SYMBOL(mark_buffer_async_write);
536
537
538/*
539 * fs/buffer.c contains helper functions for buffer-backed address space's
540 * fsync functions. A common requirement for buffer-based filesystems is
541 * that certain data from the backing blockdev needs to be written out for
542 * a successful fsync(). For example, ext2 indirect blocks need to be
543 * written back and waited upon before fsync() returns.
544 *
545 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
546 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
547 * management of a list of dependent buffers at ->i_mapping->private_list.
548 *
549 * Locking is a little subtle: try_to_free_buffers() will remove buffers
550 * from their controlling inode's queue when they are being freed. But
551 * try_to_free_buffers() will be operating against the *blockdev* mapping
552 * at the time, not against the S_ISREG file which depends on those buffers.
553 * So the locking for private_list is via the private_lock in the address_space
554 * which backs the buffers. Which is different from the address_space
555 * against which the buffers are listed. So for a particular address_space,
556 * mapping->private_lock does *not* protect mapping->private_list! In fact,
557 * mapping->private_list will always be protected by the backing blockdev's
558 * ->private_lock.
559 *
560 * Which introduces a requirement: all buffers on an address_space's
561 * ->private_list must be from the same address_space: the blockdev's.
562 *
563 * address_spaces which do not place buffers at ->private_list via these
564 * utility functions are free to use private_lock and private_list for
565 * whatever they want. The only requirement is that list_empty(private_list)
566 * be true at clear_inode() time.
567 *
568 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
569 * filesystems should do that. invalidate_inode_buffers() should just go
570 * BUG_ON(!list_empty).
571 *
572 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
573 * take an address_space, not an inode. And it should be called
574 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
575 * queued up.
576 *
577 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
578 * list if it is already on a list. Because if the buffer is on a list,
579 * it *must* already be on the right one. If not, the filesystem is being
580 * silly. This will save a ton of locking. But first we have to ensure
581 * that buffers are taken *off* the old inode's list when they are freed
582 * (presumably in truncate). That requires careful auditing of all
583 * filesystems (do it inside bforget()). It could also be done by bringing
584 * b_inode back.
585 */
586
587/*
588 * The buffer's backing address_space's private_lock must be held
589 */
Thomas Petazzonidbacefc2008-07-29 22:33:47 -0700590static void __remove_assoc_queue(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591{
592 list_del_init(&bh->b_assoc_buffers);
Jan Kara58ff4072006-10-17 00:10:19 -0700593 WARN_ON(!bh->b_assoc_map);
594 if (buffer_write_io_error(bh))
595 set_bit(AS_EIO, &bh->b_assoc_map->flags);
596 bh->b_assoc_map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597}
598
599int inode_has_buffers(struct inode *inode)
600{
601 return !list_empty(&inode->i_data.private_list);
602}
603
604/*
605 * osync is designed to support O_SYNC io. It waits synchronously for
606 * all already-submitted IO to complete, but does not queue any new
607 * writes to the disk.
608 *
609 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
610 * you dirty the buffers, and then use osync_inode_buffers to wait for
611 * completion. Any other dirty buffers which are not yet queued for
612 * write will not be flushed to disk by the osync.
613 */
614static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
615{
616 struct buffer_head *bh;
617 struct list_head *p;
618 int err = 0;
619
620 spin_lock(lock);
621repeat:
622 list_for_each_prev(p, list) {
623 bh = BH_ENTRY(p);
624 if (buffer_locked(bh)) {
625 get_bh(bh);
626 spin_unlock(lock);
627 wait_on_buffer(bh);
628 if (!buffer_uptodate(bh))
629 err = -EIO;
630 brelse(bh);
631 spin_lock(lock);
632 goto repeat;
633 }
634 }
635 spin_unlock(lock);
636 return err;
637}
638
639/**
Randy Dunlap78a4a502008-02-29 22:02:31 -0800640 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
Martin Waitz67be2dd2005-05-01 08:59:26 -0700641 * @mapping: the mapping which wants those buffers written
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 *
643 * Starts I/O against the buffers at mapping->private_list, and waits upon
644 * that I/O.
645 *
Martin Waitz67be2dd2005-05-01 08:59:26 -0700646 * Basically, this is a convenience function for fsync().
647 * @mapping is a file or directory which needs those buffers to be written for
648 * a successful fsync().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 */
650int sync_mapping_buffers(struct address_space *mapping)
651{
652 struct address_space *buffer_mapping = mapping->assoc_mapping;
653
654 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
655 return 0;
656
657 return fsync_buffers_list(&buffer_mapping->private_lock,
658 &mapping->private_list);
659}
660EXPORT_SYMBOL(sync_mapping_buffers);
661
662/*
663 * Called when we've recently written block `bblock', and it is known that
664 * `bblock' was for a buffer_boundary() buffer. This means that the block at
665 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
666 * dirty, schedule it for IO. So that indirects merge nicely with their data.
667 */
668void write_boundary_block(struct block_device *bdev,
669 sector_t bblock, unsigned blocksize)
670{
671 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
672 if (bh) {
673 if (buffer_dirty(bh))
674 ll_rw_block(WRITE, 1, &bh);
675 put_bh(bh);
676 }
677}
678
679void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
680{
681 struct address_space *mapping = inode->i_mapping;
682 struct address_space *buffer_mapping = bh->b_page->mapping;
683
684 mark_buffer_dirty(bh);
685 if (!mapping->assoc_mapping) {
686 mapping->assoc_mapping = buffer_mapping;
687 } else {
Eric Sesterhenne827f922006-03-26 18:24:46 +0200688 BUG_ON(mapping->assoc_mapping != buffer_mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 }
Jan Kara535ee2f2008-02-08 04:21:59 -0800690 if (!bh->b_assoc_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 spin_lock(&buffer_mapping->private_lock);
692 list_move_tail(&bh->b_assoc_buffers,
693 &mapping->private_list);
Jan Kara58ff4072006-10-17 00:10:19 -0700694 bh->b_assoc_map = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 spin_unlock(&buffer_mapping->private_lock);
696 }
697}
698EXPORT_SYMBOL(mark_buffer_dirty_inode);
699
700/*
Nick Piggin787d2212007-07-17 04:03:34 -0700701 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
702 * dirty.
703 *
704 * If warn is true, then emit a warning if the page is not uptodate and has
705 * not been truncated.
706 */
707static int __set_page_dirty(struct page *page,
708 struct address_space *mapping, int warn)
709{
710 if (unlikely(!mapping))
711 return !TestSetPageDirty(page);
712
713 if (TestSetPageDirty(page))
714 return 0;
715
Nick Piggin19fd6232008-07-25 19:45:32 -0700716 spin_lock_irq(&mapping->tree_lock);
Nick Piggin787d2212007-07-17 04:03:34 -0700717 if (page->mapping) { /* Race with truncate? */
718 WARN_ON_ONCE(warn && !PageUptodate(page));
719
720 if (mapping_cap_account_dirty(mapping)) {
721 __inc_zone_page_state(page, NR_FILE_DIRTY);
Peter Zijlstrac9e51e42007-10-16 23:25:47 -0700722 __inc_bdi_stat(mapping->backing_dev_info,
723 BDI_RECLAIMABLE);
Nick Piggin787d2212007-07-17 04:03:34 -0700724 task_io_account_write(PAGE_CACHE_SIZE);
725 }
726 radix_tree_tag_set(&mapping->page_tree,
727 page_index(page), PAGECACHE_TAG_DIRTY);
728 }
Nick Piggin19fd6232008-07-25 19:45:32 -0700729 spin_unlock_irq(&mapping->tree_lock);
Nick Piggin787d2212007-07-17 04:03:34 -0700730 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
731
732 return 1;
733}
734
735/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 * Add a page to the dirty page list.
737 *
738 * It is a sad fact of life that this function is called from several places
739 * deeply under spinlocking. It may not sleep.
740 *
741 * If the page has buffers, the uptodate buffers are set dirty, to preserve
742 * dirty-state coherency between the page and the buffers. It the page does
743 * not have buffers then when they are later attached they will all be set
744 * dirty.
745 *
746 * The buffers are dirtied before the page is dirtied. There's a small race
747 * window in which a writepage caller may see the page cleanness but not the
748 * buffer dirtiness. That's fine. If this code were to set the page dirty
749 * before the buffers, a concurrent writepage caller could clear the page dirty
750 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
751 * page on the dirty page list.
752 *
753 * We use private_lock to lock against try_to_free_buffers while using the
754 * page's buffer list. Also use this to protect against clean buffers being
755 * added to the page after it was set dirty.
756 *
757 * FIXME: may need to call ->reservepage here as well. That's rather up to the
758 * address_space though.
759 */
760int __set_page_dirty_buffers(struct page *page)
761{
Nick Piggin787d2212007-07-17 04:03:34 -0700762 struct address_space *mapping = page_mapping(page);
Nick Pigginebf7a222006-10-10 04:36:54 +0200763
764 if (unlikely(!mapping))
765 return !TestSetPageDirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766
767 spin_lock(&mapping->private_lock);
768 if (page_has_buffers(page)) {
769 struct buffer_head *head = page_buffers(page);
770 struct buffer_head *bh = head;
771
772 do {
773 set_buffer_dirty(bh);
774 bh = bh->b_this_page;
775 } while (bh != head);
776 }
777 spin_unlock(&mapping->private_lock);
778
Nick Piggin787d2212007-07-17 04:03:34 -0700779 return __set_page_dirty(page, mapping, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780}
781EXPORT_SYMBOL(__set_page_dirty_buffers);
782
783/*
784 * Write out and wait upon a list of buffers.
785 *
786 * We have conflicting pressures: we want to make sure that all
787 * initially dirty buffers get waited on, but that any subsequently
788 * dirtied buffers don't. After all, we don't want fsync to last
789 * forever if somebody is actively writing to the file.
790 *
791 * Do this in two main stages: first we copy dirty buffers to a
792 * temporary inode list, queueing the writes as we go. Then we clean
793 * up, waiting for those writes to complete.
794 *
795 * During this second stage, any subsequent updates to the file may end
796 * up refiling the buffer on the original inode's dirty list again, so
797 * there is a chance we will end up with a buffer queued for write but
798 * not yet completed on that list. So, as a final cleanup we go through
799 * the osync code to catch these locked, dirty buffers without requeuing
800 * any newly dirty buffers for write.
801 */
802static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
803{
804 struct buffer_head *bh;
805 struct list_head tmp;
Jan Kara535ee2f2008-02-08 04:21:59 -0800806 struct address_space *mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 int err = 0, err2;
808
809 INIT_LIST_HEAD(&tmp);
810
811 spin_lock(lock);
812 while (!list_empty(list)) {
813 bh = BH_ENTRY(list->next);
Jan Kara535ee2f2008-02-08 04:21:59 -0800814 mapping = bh->b_assoc_map;
Jan Kara58ff4072006-10-17 00:10:19 -0700815 __remove_assoc_queue(bh);
Jan Kara535ee2f2008-02-08 04:21:59 -0800816 /* Avoid race with mark_buffer_dirty_inode() which does
817 * a lockless check and we rely on seeing the dirty bit */
818 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 if (buffer_dirty(bh) || buffer_locked(bh)) {
820 list_add(&bh->b_assoc_buffers, &tmp);
Jan Kara535ee2f2008-02-08 04:21:59 -0800821 bh->b_assoc_map = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 if (buffer_dirty(bh)) {
823 get_bh(bh);
824 spin_unlock(lock);
825 /*
826 * Ensure any pending I/O completes so that
827 * ll_rw_block() actually writes the current
828 * contents - it is a noop if I/O is still in
829 * flight on potentially older contents.
830 */
Jens Axboe18ce3752008-07-01 09:07:34 +0200831 ll_rw_block(SWRITE_SYNC, 1, &bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 brelse(bh);
833 spin_lock(lock);
834 }
835 }
836 }
837
838 while (!list_empty(&tmp)) {
839 bh = BH_ENTRY(tmp.prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 get_bh(bh);
Jan Kara535ee2f2008-02-08 04:21:59 -0800841 mapping = bh->b_assoc_map;
842 __remove_assoc_queue(bh);
843 /* Avoid race with mark_buffer_dirty_inode() which does
844 * a lockless check and we rely on seeing the dirty bit */
845 smp_mb();
846 if (buffer_dirty(bh)) {
847 list_add(&bh->b_assoc_buffers,
Jan Karae3892292008-03-04 14:28:33 -0800848 &mapping->private_list);
Jan Kara535ee2f2008-02-08 04:21:59 -0800849 bh->b_assoc_map = mapping;
850 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 spin_unlock(lock);
852 wait_on_buffer(bh);
853 if (!buffer_uptodate(bh))
854 err = -EIO;
855 brelse(bh);
856 spin_lock(lock);
857 }
858
859 spin_unlock(lock);
860 err2 = osync_buffers_list(lock, list);
861 if (err)
862 return err;
863 else
864 return err2;
865}
866
867/*
868 * Invalidate any and all dirty buffers on a given inode. We are
869 * probably unmounting the fs, but that doesn't mean we have already
870 * done a sync(). Just drop the buffers from the inode list.
871 *
872 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
873 * assumes that all the buffers are against the blockdev. Not true
874 * for reiserfs.
875 */
876void invalidate_inode_buffers(struct inode *inode)
877{
878 if (inode_has_buffers(inode)) {
879 struct address_space *mapping = &inode->i_data;
880 struct list_head *list = &mapping->private_list;
881 struct address_space *buffer_mapping = mapping->assoc_mapping;
882
883 spin_lock(&buffer_mapping->private_lock);
884 while (!list_empty(list))
885 __remove_assoc_queue(BH_ENTRY(list->next));
886 spin_unlock(&buffer_mapping->private_lock);
887 }
888}
Jan Kara52b19ac2008-09-23 18:24:08 +0200889EXPORT_SYMBOL(invalidate_inode_buffers);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
891/*
892 * Remove any clean buffers from the inode's buffer list. This is called
893 * when we're trying to free the inode itself. Those buffers can pin it.
894 *
895 * Returns true if all buffers were removed.
896 */
897int remove_inode_buffers(struct inode *inode)
898{
899 int ret = 1;
900
901 if (inode_has_buffers(inode)) {
902 struct address_space *mapping = &inode->i_data;
903 struct list_head *list = &mapping->private_list;
904 struct address_space *buffer_mapping = mapping->assoc_mapping;
905
906 spin_lock(&buffer_mapping->private_lock);
907 while (!list_empty(list)) {
908 struct buffer_head *bh = BH_ENTRY(list->next);
909 if (buffer_dirty(bh)) {
910 ret = 0;
911 break;
912 }
913 __remove_assoc_queue(bh);
914 }
915 spin_unlock(&buffer_mapping->private_lock);
916 }
917 return ret;
918}
919
920/*
921 * Create the appropriate buffers when given a page for data area and
922 * the size of each buffer.. Use the bh->b_this_page linked list to
923 * follow the buffers created. Return NULL if unable to create more
924 * buffers.
925 *
926 * The retry flag is used to differentiate async IO (paging, swapping)
927 * which may not fail from ordinary buffer allocations.
928 */
929struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
930 int retry)
931{
932 struct buffer_head *bh, *head;
933 long offset;
934
935try_again:
936 head = NULL;
937 offset = PAGE_SIZE;
938 while ((offset -= size) >= 0) {
939 bh = alloc_buffer_head(GFP_NOFS);
940 if (!bh)
941 goto no_grow;
942
943 bh->b_bdev = NULL;
944 bh->b_this_page = head;
945 bh->b_blocknr = -1;
946 head = bh;
947
948 bh->b_state = 0;
949 atomic_set(&bh->b_count, 0);
Chris Masonfc5cd582006-02-01 03:06:48 -0800950 bh->b_private = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 bh->b_size = size;
952
953 /* Link the buffer to its page */
954 set_bh_page(bh, page, offset);
955
Nathan Scott01ffe332006-01-17 09:02:07 +1100956 init_buffer(bh, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 }
958 return head;
959/*
960 * In case anything failed, we just free everything we got.
961 */
962no_grow:
963 if (head) {
964 do {
965 bh = head;
966 head = head->b_this_page;
967 free_buffer_head(bh);
968 } while (head);
969 }
970
971 /*
972 * Return failure for non-async IO requests. Async IO requests
973 * are not allowed to fail, so we have to wait until buffer heads
974 * become available. But we don't want tasks sleeping with
975 * partially complete buffers, so all were released above.
976 */
977 if (!retry)
978 return NULL;
979
980 /* We're _really_ low on memory. Now we just
981 * wait for old buffer heads to become free due to
982 * finishing IO. Since this is an async request and
983 * the reserve list is empty, we're sure there are
984 * async buffer heads in use.
985 */
986 free_more_memory();
987 goto try_again;
988}
989EXPORT_SYMBOL_GPL(alloc_page_buffers);
990
991static inline void
992link_dev_buffers(struct page *page, struct buffer_head *head)
993{
994 struct buffer_head *bh, *tail;
995
996 bh = head;
997 do {
998 tail = bh;
999 bh = bh->b_this_page;
1000 } while (bh);
1001 tail->b_this_page = head;
1002 attach_page_buffers(page, head);
1003}
1004
1005/*
1006 * Initialise the state of a blockdev page's buffers.
1007 */
1008static void
1009init_page_buffers(struct page *page, struct block_device *bdev,
1010 sector_t block, int size)
1011{
1012 struct buffer_head *head = page_buffers(page);
1013 struct buffer_head *bh = head;
1014 int uptodate = PageUptodate(page);
1015
1016 do {
1017 if (!buffer_mapped(bh)) {
1018 init_buffer(bh, NULL, NULL);
1019 bh->b_bdev = bdev;
1020 bh->b_blocknr = block;
1021 if (uptodate)
1022 set_buffer_uptodate(bh);
1023 set_buffer_mapped(bh);
1024 }
1025 block++;
1026 bh = bh->b_this_page;
1027 } while (bh != head);
1028}
1029
1030/*
1031 * Create the page-cache page that contains the requested block.
1032 *
1033 * This is user purely for blockdev mappings.
1034 */
1035static struct page *
1036grow_dev_page(struct block_device *bdev, sector_t block,
1037 pgoff_t index, int size)
1038{
1039 struct inode *inode = bdev->bd_inode;
1040 struct page *page;
1041 struct buffer_head *bh;
1042
Christoph Lameterea125892007-05-16 22:11:21 -07001043 page = find_or_create_page(inode->i_mapping, index,
Mel Gorman769848c2007-07-17 04:03:05 -07001044 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 if (!page)
1046 return NULL;
1047
Eric Sesterhenne827f922006-03-26 18:24:46 +02001048 BUG_ON(!PageLocked(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049
1050 if (page_has_buffers(page)) {
1051 bh = page_buffers(page);
1052 if (bh->b_size == size) {
1053 init_page_buffers(page, bdev, block, size);
1054 return page;
1055 }
1056 if (!try_to_free_buffers(page))
1057 goto failed;
1058 }
1059
1060 /*
1061 * Allocate some buffers for this page
1062 */
1063 bh = alloc_page_buffers(page, size, 0);
1064 if (!bh)
1065 goto failed;
1066
1067 /*
1068 * Link the page to the buffers and initialise them. Take the
1069 * lock to be atomic wrt __find_get_block(), which does not
1070 * run under the page lock.
1071 */
1072 spin_lock(&inode->i_mapping->private_lock);
1073 link_dev_buffers(page, bh);
1074 init_page_buffers(page, bdev, block, size);
1075 spin_unlock(&inode->i_mapping->private_lock);
1076 return page;
1077
1078failed:
1079 BUG();
1080 unlock_page(page);
1081 page_cache_release(page);
1082 return NULL;
1083}
1084
1085/*
1086 * Create buffers for the specified block device block's page. If
1087 * that page was dirty, the buffers are set dirty also.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001089static int
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090grow_buffers(struct block_device *bdev, sector_t block, int size)
1091{
1092 struct page *page;
1093 pgoff_t index;
1094 int sizebits;
1095
1096 sizebits = -1;
1097 do {
1098 sizebits++;
1099 } while ((size << sizebits) < PAGE_SIZE);
1100
1101 index = block >> sizebits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102
Andrew Mortone5657932006-10-11 01:21:46 -07001103 /*
1104 * Check for a block which wants to lie outside our maximum possible
1105 * pagecache index. (this comparison is done using sector_t types).
1106 */
1107 if (unlikely(index != block >> sizebits)) {
1108 char b[BDEVNAME_SIZE];
1109
1110 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1111 "device %s\n",
Harvey Harrison8e24eea2008-04-30 00:55:09 -07001112 __func__, (unsigned long long)block,
Andrew Mortone5657932006-10-11 01:21:46 -07001113 bdevname(bdev, b));
1114 return -EIO;
1115 }
1116 block = index << sizebits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 /* Create a page with the proper size buffers.. */
1118 page = grow_dev_page(bdev, block, index, size);
1119 if (!page)
1120 return 0;
1121 unlock_page(page);
1122 page_cache_release(page);
1123 return 1;
1124}
1125
Adrian Bunk75c96f82005-05-05 16:16:09 -07001126static struct buffer_head *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127__getblk_slow(struct block_device *bdev, sector_t block, int size)
1128{
1129 /* Size must be multiple of hard sectorsize */
1130 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1131 (size < 512 || size > PAGE_SIZE))) {
1132 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1133 size);
1134 printk(KERN_ERR "hardsect size: %d\n",
1135 bdev_hardsect_size(bdev));
1136
1137 dump_stack();
1138 return NULL;
1139 }
1140
1141 for (;;) {
1142 struct buffer_head * bh;
Andrew Mortone5657932006-10-11 01:21:46 -07001143 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
1145 bh = __find_get_block(bdev, block, size);
1146 if (bh)
1147 return bh;
1148
Andrew Mortone5657932006-10-11 01:21:46 -07001149 ret = grow_buffers(bdev, block, size);
1150 if (ret < 0)
1151 return NULL;
1152 if (ret == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 free_more_memory();
1154 }
1155}
1156
1157/*
1158 * The relationship between dirty buffers and dirty pages:
1159 *
1160 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1161 * the page is tagged dirty in its radix tree.
1162 *
1163 * At all times, the dirtiness of the buffers represents the dirtiness of
1164 * subsections of the page. If the page has buffers, the page dirty bit is
1165 * merely a hint about the true dirty state.
1166 *
1167 * When a page is set dirty in its entirety, all its buffers are marked dirty
1168 * (if the page has buffers).
1169 *
1170 * When a buffer is marked dirty, its page is dirtied, but the page's other
1171 * buffers are not.
1172 *
1173 * Also. When blockdev buffers are explicitly read with bread(), they
1174 * individually become uptodate. But their backing page remains not
1175 * uptodate - even if all of its buffers are uptodate. A subsequent
1176 * block_read_full_page() against that page will discover all the uptodate
1177 * buffers, will set the page uptodate and will perform no I/O.
1178 */
1179
1180/**
1181 * mark_buffer_dirty - mark a buffer_head as needing writeout
Martin Waitz67be2dd2005-05-01 08:59:26 -07001182 * @bh: the buffer_head to mark dirty
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 *
1184 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1185 * backing page dirty, then tag the page as dirty in its address_space's radix
1186 * tree and then attach the address_space's inode to its superblock's dirty
1187 * inode list.
1188 *
1189 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1190 * mapping->tree_lock and the global inode_lock.
1191 */
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -08001192void mark_buffer_dirty(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193{
Nick Piggin787d2212007-07-17 04:03:34 -07001194 WARN_ON_ONCE(!buffer_uptodate(bh));
Linus Torvalds1be62dc2008-04-04 14:38:17 -07001195
1196 /*
1197 * Very *carefully* optimize the it-is-already-dirty case.
1198 *
1199 * Don't let the final "is it dirty" escape to before we
1200 * perhaps modified the buffer.
1201 */
1202 if (buffer_dirty(bh)) {
1203 smp_mb();
1204 if (buffer_dirty(bh))
1205 return;
1206 }
1207
1208 if (!test_set_buffer_dirty(bh))
Nick Piggin787d2212007-07-17 04:03:34 -07001209 __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210}
1211
1212/*
1213 * Decrement a buffer_head's reference count. If all buffers against a page
1214 * have zero reference count, are clean and unlocked, and if the page is clean
1215 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1216 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1217 * a page but it ends up not being freed, and buffers may later be reattached).
1218 */
1219void __brelse(struct buffer_head * buf)
1220{
1221 if (atomic_read(&buf->b_count)) {
1222 put_bh(buf);
1223 return;
1224 }
Arjan van de Ven5c752ad2008-07-25 19:45:40 -07001225 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226}
1227
1228/*
1229 * bforget() is like brelse(), except it discards any
1230 * potentially dirty data.
1231 */
1232void __bforget(struct buffer_head *bh)
1233{
1234 clear_buffer_dirty(bh);
Jan Kara535ee2f2008-02-08 04:21:59 -08001235 if (bh->b_assoc_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 struct address_space *buffer_mapping = bh->b_page->mapping;
1237
1238 spin_lock(&buffer_mapping->private_lock);
1239 list_del_init(&bh->b_assoc_buffers);
Jan Kara58ff4072006-10-17 00:10:19 -07001240 bh->b_assoc_map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 spin_unlock(&buffer_mapping->private_lock);
1242 }
1243 __brelse(bh);
1244}
1245
1246static struct buffer_head *__bread_slow(struct buffer_head *bh)
1247{
1248 lock_buffer(bh);
1249 if (buffer_uptodate(bh)) {
1250 unlock_buffer(bh);
1251 return bh;
1252 } else {
1253 get_bh(bh);
1254 bh->b_end_io = end_buffer_read_sync;
1255 submit_bh(READ, bh);
1256 wait_on_buffer(bh);
1257 if (buffer_uptodate(bh))
1258 return bh;
1259 }
1260 brelse(bh);
1261 return NULL;
1262}
1263
1264/*
1265 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1266 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1267 * refcount elevated by one when they're in an LRU. A buffer can only appear
1268 * once in a particular CPU's LRU. A single buffer can be present in multiple
1269 * CPU's LRUs at the same time.
1270 *
1271 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1272 * sb_find_get_block().
1273 *
1274 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1275 * a local interrupt disable for that.
1276 */
1277
1278#define BH_LRU_SIZE 8
1279
1280struct bh_lru {
1281 struct buffer_head *bhs[BH_LRU_SIZE];
1282};
1283
1284static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1285
1286#ifdef CONFIG_SMP
1287#define bh_lru_lock() local_irq_disable()
1288#define bh_lru_unlock() local_irq_enable()
1289#else
1290#define bh_lru_lock() preempt_disable()
1291#define bh_lru_unlock() preempt_enable()
1292#endif
1293
1294static inline void check_irqs_on(void)
1295{
1296#ifdef irqs_disabled
1297 BUG_ON(irqs_disabled());
1298#endif
1299}
1300
1301/*
1302 * The LRU management algorithm is dopey-but-simple. Sorry.
1303 */
1304static void bh_lru_install(struct buffer_head *bh)
1305{
1306 struct buffer_head *evictee = NULL;
1307 struct bh_lru *lru;
1308
1309 check_irqs_on();
1310 bh_lru_lock();
1311 lru = &__get_cpu_var(bh_lrus);
1312 if (lru->bhs[0] != bh) {
1313 struct buffer_head *bhs[BH_LRU_SIZE];
1314 int in;
1315 int out = 0;
1316
1317 get_bh(bh);
1318 bhs[out++] = bh;
1319 for (in = 0; in < BH_LRU_SIZE; in++) {
1320 struct buffer_head *bh2 = lru->bhs[in];
1321
1322 if (bh2 == bh) {
1323 __brelse(bh2);
1324 } else {
1325 if (out >= BH_LRU_SIZE) {
1326 BUG_ON(evictee != NULL);
1327 evictee = bh2;
1328 } else {
1329 bhs[out++] = bh2;
1330 }
1331 }
1332 }
1333 while (out < BH_LRU_SIZE)
1334 bhs[out++] = NULL;
1335 memcpy(lru->bhs, bhs, sizeof(bhs));
1336 }
1337 bh_lru_unlock();
1338
1339 if (evictee)
1340 __brelse(evictee);
1341}
1342
1343/*
1344 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1345 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001346static struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001347lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348{
1349 struct buffer_head *ret = NULL;
1350 struct bh_lru *lru;
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001351 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352
1353 check_irqs_on();
1354 bh_lru_lock();
1355 lru = &__get_cpu_var(bh_lrus);
1356 for (i = 0; i < BH_LRU_SIZE; i++) {
1357 struct buffer_head *bh = lru->bhs[i];
1358
1359 if (bh && bh->b_bdev == bdev &&
1360 bh->b_blocknr == block && bh->b_size == size) {
1361 if (i) {
1362 while (i) {
1363 lru->bhs[i] = lru->bhs[i - 1];
1364 i--;
1365 }
1366 lru->bhs[0] = bh;
1367 }
1368 get_bh(bh);
1369 ret = bh;
1370 break;
1371 }
1372 }
1373 bh_lru_unlock();
1374 return ret;
1375}
1376
1377/*
1378 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1379 * it in the LRU and mark it as accessed. If it is not present then return
1380 * NULL
1381 */
1382struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001383__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384{
1385 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1386
1387 if (bh == NULL) {
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -08001388 bh = __find_get_block_slow(bdev, block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 if (bh)
1390 bh_lru_install(bh);
1391 }
1392 if (bh)
1393 touch_buffer(bh);
1394 return bh;
1395}
1396EXPORT_SYMBOL(__find_get_block);
1397
1398/*
1399 * __getblk will locate (and, if necessary, create) the buffer_head
1400 * which corresponds to the passed block_device, block and size. The
1401 * returned buffer has its reference count incremented.
1402 *
1403 * __getblk() cannot fail - it just keeps trying. If you pass it an
1404 * illegal block number, __getblk() will happily return a buffer_head
1405 * which represents the non-existent block. Very weird.
1406 *
1407 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1408 * attempt is failing. FIXME, perhaps?
1409 */
1410struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001411__getblk(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412{
1413 struct buffer_head *bh = __find_get_block(bdev, block, size);
1414
1415 might_sleep();
1416 if (bh == NULL)
1417 bh = __getblk_slow(bdev, block, size);
1418 return bh;
1419}
1420EXPORT_SYMBOL(__getblk);
1421
1422/*
1423 * Do async read-ahead on a buffer..
1424 */
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001425void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426{
1427 struct buffer_head *bh = __getblk(bdev, block, size);
Andrew Mortona3e713b2005-10-30 15:03:15 -08001428 if (likely(bh)) {
1429 ll_rw_block(READA, 1, &bh);
1430 brelse(bh);
1431 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432}
1433EXPORT_SYMBOL(__breadahead);
1434
1435/**
1436 * __bread() - reads a specified block and returns the bh
Martin Waitz67be2dd2005-05-01 08:59:26 -07001437 * @bdev: the block_device to read from
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 * @block: number of block
1439 * @size: size (in bytes) to read
1440 *
1441 * Reads a specified block, and returns buffer head that contains it.
1442 * It returns NULL if the block was unreadable.
1443 */
1444struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001445__bread(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446{
1447 struct buffer_head *bh = __getblk(bdev, block, size);
1448
Andrew Mortona3e713b2005-10-30 15:03:15 -08001449 if (likely(bh) && !buffer_uptodate(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 bh = __bread_slow(bh);
1451 return bh;
1452}
1453EXPORT_SYMBOL(__bread);
1454
1455/*
1456 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1457 * This doesn't race because it runs in each cpu either in irq
1458 * or with preempt disabled.
1459 */
1460static void invalidate_bh_lru(void *arg)
1461{
1462 struct bh_lru *b = &get_cpu_var(bh_lrus);
1463 int i;
1464
1465 for (i = 0; i < BH_LRU_SIZE; i++) {
1466 brelse(b->bhs[i]);
1467 b->bhs[i] = NULL;
1468 }
1469 put_cpu_var(bh_lrus);
1470}
1471
Peter Zijlstraf9a14392007-05-06 14:49:55 -07001472void invalidate_bh_lrus(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473{
Jens Axboe15c8b6c2008-05-09 09:39:44 +02001474 on_each_cpu(invalidate_bh_lru, NULL, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475}
Nick Piggin9db55792008-02-08 04:19:49 -08001476EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477
1478void set_bh_page(struct buffer_head *bh,
1479 struct page *page, unsigned long offset)
1480{
1481 bh->b_page = page;
Eric Sesterhenne827f922006-03-26 18:24:46 +02001482 BUG_ON(offset >= PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 if (PageHighMem(page))
1484 /*
1485 * This catches illegal uses and preserves the offset:
1486 */
1487 bh->b_data = (char *)(0 + offset);
1488 else
1489 bh->b_data = page_address(page) + offset;
1490}
1491EXPORT_SYMBOL(set_bh_page);
1492
1493/*
1494 * Called when truncating a buffer on a page completely.
1495 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001496static void discard_buffer(struct buffer_head * bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497{
1498 lock_buffer(bh);
1499 clear_buffer_dirty(bh);
1500 bh->b_bdev = NULL;
1501 clear_buffer_mapped(bh);
1502 clear_buffer_req(bh);
1503 clear_buffer_new(bh);
1504 clear_buffer_delay(bh);
David Chinner33a266d2007-02-12 00:51:41 -08001505 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 unlock_buffer(bh);
1507}
1508
1509/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 * block_invalidatepage - invalidate part of all of a buffer-backed page
1511 *
1512 * @page: the page which is affected
1513 * @offset: the index of the truncation point
1514 *
1515 * block_invalidatepage() is called when all or part of the page has become
1516 * invalidatedby a truncate operation.
1517 *
1518 * block_invalidatepage() does not have to release all buffers, but it must
1519 * ensure that no dirty buffer is left outside @offset and that no I/O
1520 * is underway against any of the blocks which are outside the truncation
1521 * point. Because the caller is about to free (and possibly reuse) those
1522 * blocks on-disk.
1523 */
NeilBrown2ff28e22006-03-26 01:37:18 -08001524void block_invalidatepage(struct page *page, unsigned long offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525{
1526 struct buffer_head *head, *bh, *next;
1527 unsigned int curr_off = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528
1529 BUG_ON(!PageLocked(page));
1530 if (!page_has_buffers(page))
1531 goto out;
1532
1533 head = page_buffers(page);
1534 bh = head;
1535 do {
1536 unsigned int next_off = curr_off + bh->b_size;
1537 next = bh->b_this_page;
1538
1539 /*
1540 * is this block fully invalidated?
1541 */
1542 if (offset <= curr_off)
1543 discard_buffer(bh);
1544 curr_off = next_off;
1545 bh = next;
1546 } while (bh != head);
1547
1548 /*
1549 * We release buffers only if the entire page is being invalidated.
1550 * The get_block cached value has been unconditionally invalidated,
1551 * so real IO is not possible anymore.
1552 */
1553 if (offset == 0)
NeilBrown2ff28e22006-03-26 01:37:18 -08001554 try_to_release_page(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555out:
NeilBrown2ff28e22006-03-26 01:37:18 -08001556 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557}
1558EXPORT_SYMBOL(block_invalidatepage);
1559
1560/*
1561 * We attach and possibly dirty the buffers atomically wrt
1562 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1563 * is already excluded via the page lock.
1564 */
1565void create_empty_buffers(struct page *page,
1566 unsigned long blocksize, unsigned long b_state)
1567{
1568 struct buffer_head *bh, *head, *tail;
1569
1570 head = alloc_page_buffers(page, blocksize, 1);
1571 bh = head;
1572 do {
1573 bh->b_state |= b_state;
1574 tail = bh;
1575 bh = bh->b_this_page;
1576 } while (bh);
1577 tail->b_this_page = head;
1578
1579 spin_lock(&page->mapping->private_lock);
1580 if (PageUptodate(page) || PageDirty(page)) {
1581 bh = head;
1582 do {
1583 if (PageDirty(page))
1584 set_buffer_dirty(bh);
1585 if (PageUptodate(page))
1586 set_buffer_uptodate(bh);
1587 bh = bh->b_this_page;
1588 } while (bh != head);
1589 }
1590 attach_page_buffers(page, head);
1591 spin_unlock(&page->mapping->private_lock);
1592}
1593EXPORT_SYMBOL(create_empty_buffers);
1594
1595/*
1596 * We are taking a block for data and we don't want any output from any
1597 * buffer-cache aliases starting from return from that function and
1598 * until the moment when something will explicitly mark the buffer
1599 * dirty (hopefully that will not happen until we will free that block ;-)
1600 * We don't even need to mark it not-uptodate - nobody can expect
1601 * anything from a newly allocated buffer anyway. We used to used
1602 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1603 * don't want to mark the alias unmapped, for example - it would confuse
1604 * anyone who might pick it with bread() afterwards...
1605 *
1606 * Also.. Note that bforget() doesn't lock the buffer. So there can
1607 * be writeout I/O going on against recently-freed buffers. We don't
1608 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1609 * only if we really need to. That happens here.
1610 */
1611void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1612{
1613 struct buffer_head *old_bh;
1614
1615 might_sleep();
1616
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -08001617 old_bh = __find_get_block_slow(bdev, block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 if (old_bh) {
1619 clear_buffer_dirty(old_bh);
1620 wait_on_buffer(old_bh);
1621 clear_buffer_req(old_bh);
1622 __brelse(old_bh);
1623 }
1624}
1625EXPORT_SYMBOL(unmap_underlying_metadata);
1626
1627/*
1628 * NOTE! All mapped/uptodate combinations are valid:
1629 *
1630 * Mapped Uptodate Meaning
1631 *
1632 * No No "unknown" - must do get_block()
1633 * No Yes "hole" - zero-filled
1634 * Yes No "allocated" - allocated on disk, not read in
1635 * Yes Yes "valid" - allocated and up-to-date in memory.
1636 *
1637 * "Dirty" is valid only with the last case (mapped+uptodate).
1638 */
1639
1640/*
1641 * While block_write_full_page is writing back the dirty buffers under
1642 * the page lock, whoever dirtied the buffers may decide to clean them
1643 * again at any time. We handle that by only looking at the buffer
1644 * state inside lock_buffer().
1645 *
1646 * If block_write_full_page() is called for regular writeback
1647 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1648 * locked buffer. This only can happen if someone has written the buffer
1649 * directly, with submit_bh(). At the address_space level PageWriteback
1650 * prevents this contention from occurring.
1651 */
1652static int __block_write_full_page(struct inode *inode, struct page *page,
1653 get_block_t *get_block, struct writeback_control *wbc)
1654{
1655 int err;
1656 sector_t block;
1657 sector_t last_block;
Andrew Mortonf0fbd5f2005-05-05 16:15:48 -07001658 struct buffer_head *bh, *head;
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001659 const unsigned blocksize = 1 << inode->i_blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 int nr_underway = 0;
1661
1662 BUG_ON(!PageLocked(page));
1663
1664 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1665
1666 if (!page_has_buffers(page)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001667 create_empty_buffers(page, blocksize,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 (1 << BH_Dirty)|(1 << BH_Uptodate));
1669 }
1670
1671 /*
1672 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1673 * here, and the (potentially unmapped) buffers may become dirty at
1674 * any time. If a buffer becomes dirty here after we've inspected it
1675 * then we just miss that fact, and the page stays dirty.
1676 *
1677 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1678 * handle that here by just cleaning them.
1679 */
1680
Andrew Morton54b21a72006-01-08 01:03:05 -08001681 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 head = page_buffers(page);
1683 bh = head;
1684
1685 /*
1686 * Get all the dirty buffers mapped to disk addresses and
1687 * handle any aliases from the underlying blockdev's mapping.
1688 */
1689 do {
1690 if (block > last_block) {
1691 /*
1692 * mapped buffers outside i_size will occur, because
1693 * this page can be outside i_size when there is a
1694 * truncate in progress.
1695 */
1696 /*
1697 * The buffer was zeroed by block_write_full_page()
1698 */
1699 clear_buffer_dirty(bh);
1700 set_buffer_uptodate(bh);
Alex Tomas29a814d2008-07-11 19:27:31 -04001701 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1702 buffer_dirty(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001703 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 err = get_block(inode, block, bh, 1);
1705 if (err)
1706 goto recover;
Alex Tomas29a814d2008-07-11 19:27:31 -04001707 clear_buffer_delay(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 if (buffer_new(bh)) {
1709 /* blockdev mappings never come here */
1710 clear_buffer_new(bh);
1711 unmap_underlying_metadata(bh->b_bdev,
1712 bh->b_blocknr);
1713 }
1714 }
1715 bh = bh->b_this_page;
1716 block++;
1717 } while (bh != head);
1718
1719 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 if (!buffer_mapped(bh))
1721 continue;
1722 /*
1723 * If it's a fully non-blocking write attempt and we cannot
1724 * lock the buffer then redirty the page. Note that this can
1725 * potentially cause a busy-wait loop from pdflush and kswapd
1726 * activity, but those code paths have their own higher-level
1727 * throttling.
1728 */
1729 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1730 lock_buffer(bh);
Nick Pigginca5de402008-08-02 12:02:13 +02001731 } else if (!trylock_buffer(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 redirty_page_for_writepage(wbc, page);
1733 continue;
1734 }
1735 if (test_clear_buffer_dirty(bh)) {
1736 mark_buffer_async_write(bh);
1737 } else {
1738 unlock_buffer(bh);
1739 }
1740 } while ((bh = bh->b_this_page) != head);
1741
1742 /*
1743 * The page and its buffers are protected by PageWriteback(), so we can
1744 * drop the bh refcounts early.
1745 */
1746 BUG_ON(PageWriteback(page));
1747 set_page_writeback(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748
1749 do {
1750 struct buffer_head *next = bh->b_this_page;
1751 if (buffer_async_write(bh)) {
1752 submit_bh(WRITE, bh);
1753 nr_underway++;
1754 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 bh = next;
1756 } while (bh != head);
Andrew Morton05937ba2005-05-05 16:15:47 -07001757 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758
1759 err = 0;
1760done:
1761 if (nr_underway == 0) {
1762 /*
1763 * The page was marked dirty, but the buffers were
1764 * clean. Someone wrote them back by hand with
1765 * ll_rw_block/submit_bh. A rare case.
1766 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 end_page_writeback(page);
Nick Piggin3d67f2d2007-05-06 14:49:05 -07001768
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 /*
1770 * The page and buffer_heads can be released at any time from
1771 * here on.
1772 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 }
1774 return err;
1775
1776recover:
1777 /*
1778 * ENOSPC, or some other error. We may already have added some
1779 * blocks to the file, so we need to write these out to avoid
1780 * exposing stale data.
1781 * The page is currently locked and not marked for writeback
1782 */
1783 bh = head;
1784 /* Recovery: lock and submit the mapped buffers */
1785 do {
Alex Tomas29a814d2008-07-11 19:27:31 -04001786 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1787 !buffer_delay(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 lock_buffer(bh);
1789 mark_buffer_async_write(bh);
1790 } else {
1791 /*
1792 * The buffer may have been set dirty during
1793 * attachment to a dirty page.
1794 */
1795 clear_buffer_dirty(bh);
1796 }
1797 } while ((bh = bh->b_this_page) != head);
1798 SetPageError(page);
1799 BUG_ON(PageWriteback(page));
Andrew Morton7e4c3692007-05-08 00:23:27 -07001800 mapping_set_error(page->mapping, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 set_page_writeback(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 do {
1803 struct buffer_head *next = bh->b_this_page;
1804 if (buffer_async_write(bh)) {
1805 clear_buffer_dirty(bh);
1806 submit_bh(WRITE, bh);
1807 nr_underway++;
1808 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 bh = next;
1810 } while (bh != head);
Nick Pigginffda9d32007-02-20 13:57:54 -08001811 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 goto done;
1813}
1814
Nick Pigginafddba42007-10-16 01:25:01 -07001815/*
1816 * If a page has any new buffers, zero them out here, and mark them uptodate
1817 * and dirty so they'll be written out (in order to prevent uninitialised
1818 * block data from leaking). And clear the new bit.
1819 */
1820void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1821{
1822 unsigned int block_start, block_end;
1823 struct buffer_head *head, *bh;
1824
1825 BUG_ON(!PageLocked(page));
1826 if (!page_has_buffers(page))
1827 return;
1828
1829 bh = head = page_buffers(page);
1830 block_start = 0;
1831 do {
1832 block_end = block_start + bh->b_size;
1833
1834 if (buffer_new(bh)) {
1835 if (block_end > from && block_start < to) {
1836 if (!PageUptodate(page)) {
1837 unsigned start, size;
1838
1839 start = max(from, block_start);
1840 size = min(to, block_end) - start;
1841
Christoph Lametereebd2aa2008-02-04 22:28:29 -08001842 zero_user(page, start, size);
Nick Pigginafddba42007-10-16 01:25:01 -07001843 set_buffer_uptodate(bh);
1844 }
1845
1846 clear_buffer_new(bh);
1847 mark_buffer_dirty(bh);
1848 }
1849 }
1850
1851 block_start = block_end;
1852 bh = bh->b_this_page;
1853 } while (bh != head);
1854}
1855EXPORT_SYMBOL(page_zero_new_buffers);
1856
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857static int __block_prepare_write(struct inode *inode, struct page *page,
1858 unsigned from, unsigned to, get_block_t *get_block)
1859{
1860 unsigned block_start, block_end;
1861 sector_t block;
1862 int err = 0;
1863 unsigned blocksize, bbits;
1864 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1865
1866 BUG_ON(!PageLocked(page));
1867 BUG_ON(from > PAGE_CACHE_SIZE);
1868 BUG_ON(to > PAGE_CACHE_SIZE);
1869 BUG_ON(from > to);
1870
1871 blocksize = 1 << inode->i_blkbits;
1872 if (!page_has_buffers(page))
1873 create_empty_buffers(page, blocksize, 0);
1874 head = page_buffers(page);
1875
1876 bbits = inode->i_blkbits;
1877 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1878
1879 for(bh = head, block_start = 0; bh != head || !block_start;
1880 block++, block_start=block_end, bh = bh->b_this_page) {
1881 block_end = block_start + blocksize;
1882 if (block_end <= from || block_start >= to) {
1883 if (PageUptodate(page)) {
1884 if (!buffer_uptodate(bh))
1885 set_buffer_uptodate(bh);
1886 }
1887 continue;
1888 }
1889 if (buffer_new(bh))
1890 clear_buffer_new(bh);
1891 if (!buffer_mapped(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001892 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 err = get_block(inode, block, bh, 1);
1894 if (err)
Nick Pigginf3ddbdc2005-05-05 16:15:45 -07001895 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 if (buffer_new(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 unmap_underlying_metadata(bh->b_bdev,
1898 bh->b_blocknr);
1899 if (PageUptodate(page)) {
Nick Piggin637aff42007-10-16 01:25:00 -07001900 clear_buffer_new(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 set_buffer_uptodate(bh);
Nick Piggin637aff42007-10-16 01:25:00 -07001902 mark_buffer_dirty(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 continue;
1904 }
Christoph Lametereebd2aa2008-02-04 22:28:29 -08001905 if (block_end > to || block_start < from)
1906 zero_user_segments(page,
1907 to, block_end,
1908 block_start, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909 continue;
1910 }
1911 }
1912 if (PageUptodate(page)) {
1913 if (!buffer_uptodate(bh))
1914 set_buffer_uptodate(bh);
1915 continue;
1916 }
1917 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
David Chinner33a266d2007-02-12 00:51:41 -08001918 !buffer_unwritten(bh) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 (block_start < from || block_end > to)) {
1920 ll_rw_block(READ, 1, &bh);
1921 *wait_bh++=bh;
1922 }
1923 }
1924 /*
1925 * If we issued read requests - let them complete.
1926 */
1927 while(wait_bh > wait) {
1928 wait_on_buffer(*--wait_bh);
1929 if (!buffer_uptodate(*wait_bh))
Nick Pigginf3ddbdc2005-05-05 16:15:45 -07001930 err = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 }
Nick Pigginafddba42007-10-16 01:25:01 -07001932 if (unlikely(err))
1933 page_zero_new_buffers(page, from, to);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 return err;
1935}
1936
1937static int __block_commit_write(struct inode *inode, struct page *page,
1938 unsigned from, unsigned to)
1939{
1940 unsigned block_start, block_end;
1941 int partial = 0;
1942 unsigned blocksize;
1943 struct buffer_head *bh, *head;
1944
1945 blocksize = 1 << inode->i_blkbits;
1946
1947 for(bh = head = page_buffers(page), block_start = 0;
1948 bh != head || !block_start;
1949 block_start=block_end, bh = bh->b_this_page) {
1950 block_end = block_start + blocksize;
1951 if (block_end <= from || block_start >= to) {
1952 if (!buffer_uptodate(bh))
1953 partial = 1;
1954 } else {
1955 set_buffer_uptodate(bh);
1956 mark_buffer_dirty(bh);
1957 }
Nick Pigginafddba42007-10-16 01:25:01 -07001958 clear_buffer_new(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 }
1960
1961 /*
1962 * If this is a partial write which happened to make all buffers
1963 * uptodate then we can optimize away a bogus readpage() for
1964 * the next read(). Here we 'discover' whether the page went
1965 * uptodate as a result of this (potentially partial) write.
1966 */
1967 if (!partial)
1968 SetPageUptodate(page);
1969 return 0;
1970}
1971
1972/*
Nick Pigginafddba42007-10-16 01:25:01 -07001973 * block_write_begin takes care of the basic task of block allocation and
1974 * bringing partial write blocks uptodate first.
1975 *
1976 * If *pagep is not NULL, then block_write_begin uses the locked page
1977 * at *pagep rather than allocating its own. In this case, the page will
1978 * not be unlocked or deallocated on failure.
1979 */
1980int block_write_begin(struct file *file, struct address_space *mapping,
1981 loff_t pos, unsigned len, unsigned flags,
1982 struct page **pagep, void **fsdata,
1983 get_block_t *get_block)
1984{
1985 struct inode *inode = mapping->host;
1986 int status = 0;
1987 struct page *page;
1988 pgoff_t index;
1989 unsigned start, end;
1990 int ownpage = 0;
1991
1992 index = pos >> PAGE_CACHE_SHIFT;
1993 start = pos & (PAGE_CACHE_SIZE - 1);
1994 end = start + len;
1995
1996 page = *pagep;
1997 if (page == NULL) {
1998 ownpage = 1;
Nick Piggin54566b22009-01-04 12:00:53 -08001999 page = grab_cache_page_write_begin(mapping, index, flags);
Nick Pigginafddba42007-10-16 01:25:01 -07002000 if (!page) {
2001 status = -ENOMEM;
2002 goto out;
2003 }
2004 *pagep = page;
2005 } else
2006 BUG_ON(!PageLocked(page));
2007
2008 status = __block_prepare_write(inode, page, start, end, get_block);
2009 if (unlikely(status)) {
2010 ClearPageUptodate(page);
2011
2012 if (ownpage) {
2013 unlock_page(page);
2014 page_cache_release(page);
2015 *pagep = NULL;
2016
2017 /*
2018 * prepare_write() may have instantiated a few blocks
2019 * outside i_size. Trim these off again. Don't need
2020 * i_size_read because we hold i_mutex.
2021 */
2022 if (pos + len > inode->i_size)
2023 vmtruncate(inode, inode->i_size);
2024 }
2025 goto out;
2026 }
2027
2028out:
2029 return status;
2030}
2031EXPORT_SYMBOL(block_write_begin);
2032
2033int block_write_end(struct file *file, struct address_space *mapping,
2034 loff_t pos, unsigned len, unsigned copied,
2035 struct page *page, void *fsdata)
2036{
2037 struct inode *inode = mapping->host;
2038 unsigned start;
2039
2040 start = pos & (PAGE_CACHE_SIZE - 1);
2041
2042 if (unlikely(copied < len)) {
2043 /*
2044 * The buffers that were written will now be uptodate, so we
2045 * don't have to worry about a readpage reading them and
2046 * overwriting a partial write. However if we have encountered
2047 * a short write and only partially written into a buffer, it
2048 * will not be marked uptodate, so a readpage might come in and
2049 * destroy our partial write.
2050 *
2051 * Do the simplest thing, and just treat any short write to a
2052 * non uptodate page as a zero-length write, and force the
2053 * caller to redo the whole thing.
2054 */
2055 if (!PageUptodate(page))
2056 copied = 0;
2057
2058 page_zero_new_buffers(page, start+copied, start+len);
2059 }
2060 flush_dcache_page(page);
2061
2062 /* This could be a short (even 0-length) commit */
2063 __block_commit_write(inode, page, start, start+copied);
2064
2065 return copied;
2066}
2067EXPORT_SYMBOL(block_write_end);
2068
2069int generic_write_end(struct file *file, struct address_space *mapping,
2070 loff_t pos, unsigned len, unsigned copied,
2071 struct page *page, void *fsdata)
2072{
2073 struct inode *inode = mapping->host;
Jan Karac7d206b2008-07-11 19:27:31 -04002074 int i_size_changed = 0;
Nick Pigginafddba42007-10-16 01:25:01 -07002075
2076 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2077
2078 /*
2079 * No need to use i_size_read() here, the i_size
2080 * cannot change under us because we hold i_mutex.
2081 *
2082 * But it's important to update i_size while still holding page lock:
2083 * page writeout could otherwise come in and zero beyond i_size.
2084 */
2085 if (pos+copied > inode->i_size) {
2086 i_size_write(inode, pos+copied);
Jan Karac7d206b2008-07-11 19:27:31 -04002087 i_size_changed = 1;
Nick Pigginafddba42007-10-16 01:25:01 -07002088 }
2089
2090 unlock_page(page);
2091 page_cache_release(page);
2092
Jan Karac7d206b2008-07-11 19:27:31 -04002093 /*
2094 * Don't mark the inode dirty under page lock. First, it unnecessarily
2095 * makes the holding time of page lock longer. Second, it forces lock
2096 * ordering of page lock and transaction start for journaling
2097 * filesystems.
2098 */
2099 if (i_size_changed)
2100 mark_inode_dirty(inode);
2101
Nick Pigginafddba42007-10-16 01:25:01 -07002102 return copied;
2103}
2104EXPORT_SYMBOL(generic_write_end);
2105
2106/*
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07002107 * block_is_partially_uptodate checks whether buffers within a page are
2108 * uptodate or not.
2109 *
2110 * Returns true if all buffers which correspond to a file portion
2111 * we want to read are uptodate.
2112 */
2113int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2114 unsigned long from)
2115{
2116 struct inode *inode = page->mapping->host;
2117 unsigned block_start, block_end, blocksize;
2118 unsigned to;
2119 struct buffer_head *bh, *head;
2120 int ret = 1;
2121
2122 if (!page_has_buffers(page))
2123 return 0;
2124
2125 blocksize = 1 << inode->i_blkbits;
2126 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2127 to = from + to;
2128 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2129 return 0;
2130
2131 head = page_buffers(page);
2132 bh = head;
2133 block_start = 0;
2134 do {
2135 block_end = block_start + blocksize;
2136 if (block_end > from && block_start < to) {
2137 if (!buffer_uptodate(bh)) {
2138 ret = 0;
2139 break;
2140 }
2141 if (block_end >= to)
2142 break;
2143 }
2144 block_start = block_end;
2145 bh = bh->b_this_page;
2146 } while (bh != head);
2147
2148 return ret;
2149}
2150EXPORT_SYMBOL(block_is_partially_uptodate);
2151
2152/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 * Generic "read page" function for block devices that have the normal
2154 * get_block functionality. This is most of the block device filesystems.
2155 * Reads the page asynchronously --- the unlock_buffer() and
2156 * set/clear_buffer_uptodate() functions propagate buffer state into the
2157 * page struct once IO has completed.
2158 */
2159int block_read_full_page(struct page *page, get_block_t *get_block)
2160{
2161 struct inode *inode = page->mapping->host;
2162 sector_t iblock, lblock;
2163 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2164 unsigned int blocksize;
2165 int nr, i;
2166 int fully_mapped = 1;
2167
Matt Mackallcd7619d2005-05-01 08:59:01 -07002168 BUG_ON(!PageLocked(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 blocksize = 1 << inode->i_blkbits;
2170 if (!page_has_buffers(page))
2171 create_empty_buffers(page, blocksize, 0);
2172 head = page_buffers(page);
2173
2174 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2175 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2176 bh = head;
2177 nr = 0;
2178 i = 0;
2179
2180 do {
2181 if (buffer_uptodate(bh))
2182 continue;
2183
2184 if (!buffer_mapped(bh)) {
Andrew Mortonc64610b2005-05-16 21:53:49 -07002185 int err = 0;
2186
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 fully_mapped = 0;
2188 if (iblock < lblock) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002189 WARN_ON(bh->b_size != blocksize);
Andrew Mortonc64610b2005-05-16 21:53:49 -07002190 err = get_block(inode, iblock, bh, 0);
2191 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 SetPageError(page);
2193 }
2194 if (!buffer_mapped(bh)) {
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002195 zero_user(page, i * blocksize, blocksize);
Andrew Mortonc64610b2005-05-16 21:53:49 -07002196 if (!err)
2197 set_buffer_uptodate(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 continue;
2199 }
2200 /*
2201 * get_block() might have updated the buffer
2202 * synchronously
2203 */
2204 if (buffer_uptodate(bh))
2205 continue;
2206 }
2207 arr[nr++] = bh;
2208 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2209
2210 if (fully_mapped)
2211 SetPageMappedToDisk(page);
2212
2213 if (!nr) {
2214 /*
2215 * All buffers are uptodate - we can set the page uptodate
2216 * as well. But not if get_block() returned an error.
2217 */
2218 if (!PageError(page))
2219 SetPageUptodate(page);
2220 unlock_page(page);
2221 return 0;
2222 }
2223
2224 /* Stage two: lock the buffers */
2225 for (i = 0; i < nr; i++) {
2226 bh = arr[i];
2227 lock_buffer(bh);
2228 mark_buffer_async_read(bh);
2229 }
2230
2231 /*
2232 * Stage 3: start the IO. Check for uptodateness
2233 * inside the buffer lock in case another process reading
2234 * the underlying blockdev brought it uptodate (the sct fix).
2235 */
2236 for (i = 0; i < nr; i++) {
2237 bh = arr[i];
2238 if (buffer_uptodate(bh))
2239 end_buffer_async_read(bh, 1);
2240 else
2241 submit_bh(READ, bh);
2242 }
2243 return 0;
2244}
2245
2246/* utility function for filesystems that need to do work on expanding
Nick Piggin89e10782007-10-16 01:25:07 -07002247 * truncates. Uses filesystem pagecache writes to allow the filesystem to
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 * deal with the hole.
2249 */
Nick Piggin89e10782007-10-16 01:25:07 -07002250int generic_cont_expand_simple(struct inode *inode, loff_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251{
2252 struct address_space *mapping = inode->i_mapping;
2253 struct page *page;
Nick Piggin89e10782007-10-16 01:25:07 -07002254 void *fsdata;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002255 unsigned long limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 int err;
2257
2258 err = -EFBIG;
2259 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2260 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2261 send_sig(SIGXFSZ, current, 0);
2262 goto out;
2263 }
2264 if (size > inode->i_sb->s_maxbytes)
2265 goto out;
2266
Nick Piggin89e10782007-10-16 01:25:07 -07002267 err = pagecache_write_begin(NULL, mapping, size, 0,
2268 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2269 &page, &fsdata);
2270 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 goto out;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002272
Nick Piggin89e10782007-10-16 01:25:07 -07002273 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2274 BUG_ON(err > 0);
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002275
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276out:
2277 return err;
2278}
2279
Adrian Bunkf1e3af72008-04-29 00:59:01 -07002280static int cont_expand_zero(struct file *file, struct address_space *mapping,
2281 loff_t pos, loff_t *bytes)
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002282{
Nick Piggin89e10782007-10-16 01:25:07 -07002283 struct inode *inode = mapping->host;
2284 unsigned blocksize = 1 << inode->i_blkbits;
2285 struct page *page;
2286 void *fsdata;
2287 pgoff_t index, curidx;
2288 loff_t curpos;
2289 unsigned zerofrom, offset, len;
2290 int err = 0;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002291
Nick Piggin89e10782007-10-16 01:25:07 -07002292 index = pos >> PAGE_CACHE_SHIFT;
2293 offset = pos & ~PAGE_CACHE_MASK;
2294
2295 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2296 zerofrom = curpos & ~PAGE_CACHE_MASK;
2297 if (zerofrom & (blocksize-1)) {
2298 *bytes |= (blocksize-1);
2299 (*bytes)++;
2300 }
2301 len = PAGE_CACHE_SIZE - zerofrom;
2302
2303 err = pagecache_write_begin(file, mapping, curpos, len,
2304 AOP_FLAG_UNINTERRUPTIBLE,
2305 &page, &fsdata);
2306 if (err)
2307 goto out;
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002308 zero_user(page, zerofrom, len);
Nick Piggin89e10782007-10-16 01:25:07 -07002309 err = pagecache_write_end(file, mapping, curpos, len, len,
2310 page, fsdata);
2311 if (err < 0)
2312 goto out;
2313 BUG_ON(err != len);
2314 err = 0;
OGAWA Hirofumi061e9742008-04-28 02:16:28 -07002315
2316 balance_dirty_pages_ratelimited(mapping);
Nick Piggin89e10782007-10-16 01:25:07 -07002317 }
2318
2319 /* page covers the boundary, find the boundary offset */
2320 if (index == curidx) {
2321 zerofrom = curpos & ~PAGE_CACHE_MASK;
2322 /* if we will expand the thing last block will be filled */
2323 if (offset <= zerofrom) {
2324 goto out;
2325 }
2326 if (zerofrom & (blocksize-1)) {
2327 *bytes |= (blocksize-1);
2328 (*bytes)++;
2329 }
2330 len = offset - zerofrom;
2331
2332 err = pagecache_write_begin(file, mapping, curpos, len,
2333 AOP_FLAG_UNINTERRUPTIBLE,
2334 &page, &fsdata);
2335 if (err)
2336 goto out;
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002337 zero_user(page, zerofrom, len);
Nick Piggin89e10782007-10-16 01:25:07 -07002338 err = pagecache_write_end(file, mapping, curpos, len, len,
2339 page, fsdata);
2340 if (err < 0)
2341 goto out;
2342 BUG_ON(err != len);
2343 err = 0;
2344 }
2345out:
2346 return err;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002347}
2348
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349/*
2350 * For moronic filesystems that do not allow holes in file.
2351 * We may have to extend the file.
2352 */
Nick Piggin89e10782007-10-16 01:25:07 -07002353int cont_write_begin(struct file *file, struct address_space *mapping,
2354 loff_t pos, unsigned len, unsigned flags,
2355 struct page **pagep, void **fsdata,
2356 get_block_t *get_block, loff_t *bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 struct inode *inode = mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 unsigned blocksize = 1 << inode->i_blkbits;
Nick Piggin89e10782007-10-16 01:25:07 -07002360 unsigned zerofrom;
2361 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362
Nick Piggin89e10782007-10-16 01:25:07 -07002363 err = cont_expand_zero(file, mapping, pos, bytes);
2364 if (err)
2365 goto out;
2366
2367 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2368 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2369 *bytes |= (blocksize-1);
2370 (*bytes)++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371 }
2372
Nick Piggin89e10782007-10-16 01:25:07 -07002373 *pagep = NULL;
2374 err = block_write_begin(file, mapping, pos, len,
2375 flags, pagep, fsdata, get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376out:
Nick Piggin89e10782007-10-16 01:25:07 -07002377 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378}
2379
2380int block_prepare_write(struct page *page, unsigned from, unsigned to,
2381 get_block_t *get_block)
2382{
2383 struct inode *inode = page->mapping->host;
2384 int err = __block_prepare_write(inode, page, from, to, get_block);
2385 if (err)
2386 ClearPageUptodate(page);
2387 return err;
2388}
2389
2390int block_commit_write(struct page *page, unsigned from, unsigned to)
2391{
2392 struct inode *inode = page->mapping->host;
2393 __block_commit_write(inode,page,from,to);
2394 return 0;
2395}
2396
David Chinner54171692007-07-19 17:39:55 +10002397/*
2398 * block_page_mkwrite() is not allowed to change the file size as it gets
2399 * called from a page fault handler when a page is first dirtied. Hence we must
2400 * be careful to check for EOF conditions here. We set the page up correctly
2401 * for a written page which means we get ENOSPC checking when writing into
2402 * holes and correct delalloc and unwritten extent mapping on filesystems that
2403 * support these features.
2404 *
2405 * We are not allowed to take the i_mutex here so we have to play games to
2406 * protect against truncate races as the page could now be beyond EOF. Because
2407 * vmtruncate() writes the inode size before removing pages, once we have the
2408 * page lock we can determine safely if the page is beyond EOF. If it is not
2409 * beyond EOF, then the page is guaranteed safe against truncation until we
2410 * unlock the page.
2411 */
2412int
2413block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2414 get_block_t get_block)
2415{
2416 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2417 unsigned long end;
2418 loff_t size;
2419 int ret = -EINVAL;
2420
2421 lock_page(page);
2422 size = i_size_read(inode);
2423 if ((page->mapping != inode->i_mapping) ||
Nick Piggin18336332007-07-20 00:31:45 -07002424 (page_offset(page) > size)) {
David Chinner54171692007-07-19 17:39:55 +10002425 /* page got truncated out from underneath us */
2426 goto out_unlock;
2427 }
2428
2429 /* page is wholly or partially inside EOF */
2430 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2431 end = size & ~PAGE_CACHE_MASK;
2432 else
2433 end = PAGE_CACHE_SIZE;
2434
2435 ret = block_prepare_write(page, 0, end, get_block);
2436 if (!ret)
2437 ret = block_commit_write(page, 0, end);
2438
2439out_unlock:
2440 unlock_page(page);
2441 return ret;
2442}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443
2444/*
Nick Piggin03158cd2007-10-16 01:25:25 -07002445 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 * immediately, while under the page lock. So it needs a special end_io
2447 * handler which does not touch the bh after unlocking it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448 */
2449static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2450{
Dmitry Monakhov68671f32007-10-16 01:24:47 -07002451 __end_buffer_read_notouch(bh, uptodate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452}
2453
2454/*
Nick Piggin03158cd2007-10-16 01:25:25 -07002455 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2456 * the page (converting it to circular linked list and taking care of page
2457 * dirty races).
2458 */
2459static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2460{
2461 struct buffer_head *bh;
2462
2463 BUG_ON(!PageLocked(page));
2464
2465 spin_lock(&page->mapping->private_lock);
2466 bh = head;
2467 do {
2468 if (PageDirty(page))
2469 set_buffer_dirty(bh);
2470 if (!bh->b_this_page)
2471 bh->b_this_page = head;
2472 bh = bh->b_this_page;
2473 } while (bh != head);
2474 attach_page_buffers(page, head);
2475 spin_unlock(&page->mapping->private_lock);
2476}
2477
2478/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479 * On entry, the page is fully not uptodate.
2480 * On exit the page is fully uptodate in the areas outside (from,to)
2481 */
Nick Piggin03158cd2007-10-16 01:25:25 -07002482int nobh_write_begin(struct file *file, struct address_space *mapping,
2483 loff_t pos, unsigned len, unsigned flags,
2484 struct page **pagep, void **fsdata,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485 get_block_t *get_block)
2486{
Nick Piggin03158cd2007-10-16 01:25:25 -07002487 struct inode *inode = mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488 const unsigned blkbits = inode->i_blkbits;
2489 const unsigned blocksize = 1 << blkbits;
Nick Piggina4b06722007-10-16 01:24:48 -07002490 struct buffer_head *head, *bh;
Nick Piggin03158cd2007-10-16 01:25:25 -07002491 struct page *page;
2492 pgoff_t index;
2493 unsigned from, to;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 unsigned block_in_page;
Nick Piggina4b06722007-10-16 01:24:48 -07002495 unsigned block_start, block_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496 sector_t block_in_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 int nr_reads = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 int ret = 0;
2499 int is_mapped_to_disk = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500
Nick Piggin03158cd2007-10-16 01:25:25 -07002501 index = pos >> PAGE_CACHE_SHIFT;
2502 from = pos & (PAGE_CACHE_SIZE - 1);
2503 to = from + len;
2504
Nick Piggin54566b22009-01-04 12:00:53 -08002505 page = grab_cache_page_write_begin(mapping, index, flags);
Nick Piggin03158cd2007-10-16 01:25:25 -07002506 if (!page)
2507 return -ENOMEM;
2508 *pagep = page;
2509 *fsdata = NULL;
2510
2511 if (page_has_buffers(page)) {
2512 unlock_page(page);
2513 page_cache_release(page);
2514 *pagep = NULL;
2515 return block_write_begin(file, mapping, pos, len, flags, pagep,
2516 fsdata, get_block);
2517 }
Nick Piggina4b06722007-10-16 01:24:48 -07002518
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519 if (PageMappedToDisk(page))
2520 return 0;
2521
Nick Piggina4b06722007-10-16 01:24:48 -07002522 /*
2523 * Allocate buffers so that we can keep track of state, and potentially
2524 * attach them to the page if an error occurs. In the common case of
2525 * no error, they will just be freed again without ever being attached
2526 * to the page (which is all OK, because we're under the page lock).
2527 *
2528 * Be careful: the buffer linked list is a NULL terminated one, rather
2529 * than the circular one we're used to.
2530 */
2531 head = alloc_page_buffers(page, blocksize, 0);
Nick Piggin03158cd2007-10-16 01:25:25 -07002532 if (!head) {
2533 ret = -ENOMEM;
2534 goto out_release;
2535 }
Nick Piggina4b06722007-10-16 01:24:48 -07002536
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538
2539 /*
2540 * We loop across all blocks in the page, whether or not they are
2541 * part of the affected region. This is so we can discover if the
2542 * page is fully mapped-to-disk.
2543 */
Nick Piggina4b06722007-10-16 01:24:48 -07002544 for (block_start = 0, block_in_page = 0, bh = head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 block_start < PAGE_CACHE_SIZE;
Nick Piggina4b06722007-10-16 01:24:48 -07002546 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 int create;
2548
Nick Piggina4b06722007-10-16 01:24:48 -07002549 block_end = block_start + blocksize;
2550 bh->b_state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551 create = 1;
2552 if (block_start >= to)
2553 create = 0;
2554 ret = get_block(inode, block_in_file + block_in_page,
Nick Piggina4b06722007-10-16 01:24:48 -07002555 bh, create);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556 if (ret)
2557 goto failed;
Nick Piggina4b06722007-10-16 01:24:48 -07002558 if (!buffer_mapped(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559 is_mapped_to_disk = 0;
Nick Piggina4b06722007-10-16 01:24:48 -07002560 if (buffer_new(bh))
2561 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2562 if (PageUptodate(page)) {
2563 set_buffer_uptodate(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 continue;
Nick Piggina4b06722007-10-16 01:24:48 -07002565 }
2566 if (buffer_new(bh) || !buffer_mapped(bh)) {
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002567 zero_user_segments(page, block_start, from,
2568 to, block_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 continue;
2570 }
Nick Piggina4b06722007-10-16 01:24:48 -07002571 if (buffer_uptodate(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572 continue; /* reiserfs does this */
2573 if (block_start < from || block_end > to) {
Nick Piggina4b06722007-10-16 01:24:48 -07002574 lock_buffer(bh);
2575 bh->b_end_io = end_buffer_read_nobh;
2576 submit_bh(READ, bh);
2577 nr_reads++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578 }
2579 }
2580
2581 if (nr_reads) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 /*
2583 * The page is locked, so these buffers are protected from
2584 * any VM or truncate activity. Hence we don't need to care
2585 * for the buffer_head refcounts.
2586 */
Nick Piggina4b06722007-10-16 01:24:48 -07002587 for (bh = head; bh; bh = bh->b_this_page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 wait_on_buffer(bh);
2589 if (!buffer_uptodate(bh))
2590 ret = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 }
2592 if (ret)
2593 goto failed;
2594 }
2595
2596 if (is_mapped_to_disk)
2597 SetPageMappedToDisk(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598
Nick Piggin03158cd2007-10-16 01:25:25 -07002599 *fsdata = head; /* to be released by nobh_write_end */
Nick Piggina4b06722007-10-16 01:24:48 -07002600
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601 return 0;
2602
2603failed:
Nick Piggin03158cd2007-10-16 01:25:25 -07002604 BUG_ON(!ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605 /*
Nick Piggina4b06722007-10-16 01:24:48 -07002606 * Error recovery is a bit difficult. We need to zero out blocks that
2607 * were newly allocated, and dirty them to ensure they get written out.
2608 * Buffers need to be attached to the page at this point, otherwise
2609 * the handling of potential IO errors during writeout would be hard
2610 * (could try doing synchronous writeout, but what if that fails too?)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611 */
Nick Piggin03158cd2007-10-16 01:25:25 -07002612 attach_nobh_buffers(page, head);
2613 page_zero_new_buffers(page, from, to);
Nick Piggina4b06722007-10-16 01:24:48 -07002614
Nick Piggin03158cd2007-10-16 01:25:25 -07002615out_release:
2616 unlock_page(page);
2617 page_cache_release(page);
2618 *pagep = NULL;
Nick Piggina4b06722007-10-16 01:24:48 -07002619
Nick Piggin03158cd2007-10-16 01:25:25 -07002620 if (pos + len > inode->i_size)
2621 vmtruncate(inode, inode->i_size);
Nick Piggina4b06722007-10-16 01:24:48 -07002622
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623 return ret;
2624}
Nick Piggin03158cd2007-10-16 01:25:25 -07002625EXPORT_SYMBOL(nobh_write_begin);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626
Nick Piggin03158cd2007-10-16 01:25:25 -07002627int nobh_write_end(struct file *file, struct address_space *mapping,
2628 loff_t pos, unsigned len, unsigned copied,
2629 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630{
2631 struct inode *inode = page->mapping->host;
Nick Pigginefdc3132007-10-21 06:57:41 +02002632 struct buffer_head *head = fsdata;
Nick Piggin03158cd2007-10-16 01:25:25 -07002633 struct buffer_head *bh;
Dmitri Monakhov5b41e742008-03-28 14:15:52 -07002634 BUG_ON(fsdata != NULL && page_has_buffers(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635
Dmitri Monakhov5b41e742008-03-28 14:15:52 -07002636 if (unlikely(copied < len) && !page_has_buffers(page))
2637 attach_nobh_buffers(page, head);
2638 if (page_has_buffers(page))
2639 return generic_write_end(file, mapping, pos, len,
2640 copied, page, fsdata);
Nick Piggina4b06722007-10-16 01:24:48 -07002641
Nick Piggin22c8ca72007-02-20 13:58:09 -08002642 SetPageUptodate(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643 set_page_dirty(page);
Nick Piggin03158cd2007-10-16 01:25:25 -07002644 if (pos+copied > inode->i_size) {
2645 i_size_write(inode, pos+copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646 mark_inode_dirty(inode);
2647 }
Nick Piggin03158cd2007-10-16 01:25:25 -07002648
2649 unlock_page(page);
2650 page_cache_release(page);
2651
Nick Piggin03158cd2007-10-16 01:25:25 -07002652 while (head) {
2653 bh = head;
2654 head = head->b_this_page;
2655 free_buffer_head(bh);
2656 }
2657
2658 return copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659}
Nick Piggin03158cd2007-10-16 01:25:25 -07002660EXPORT_SYMBOL(nobh_write_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661
2662/*
2663 * nobh_writepage() - based on block_full_write_page() except
2664 * that it tries to operate without attaching bufferheads to
2665 * the page.
2666 */
2667int nobh_writepage(struct page *page, get_block_t *get_block,
2668 struct writeback_control *wbc)
2669{
2670 struct inode * const inode = page->mapping->host;
2671 loff_t i_size = i_size_read(inode);
2672 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2673 unsigned offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674 int ret;
2675
2676 /* Is the page fully inside i_size? */
2677 if (page->index < end_index)
2678 goto out;
2679
2680 /* Is the page fully outside i_size? (truncate in progress) */
2681 offset = i_size & (PAGE_CACHE_SIZE-1);
2682 if (page->index >= end_index+1 || !offset) {
2683 /*
2684 * The page may have dirty, unmapped buffers. For example,
2685 * they may have been added in ext3_writepage(). Make them
2686 * freeable here, so the page does not leak.
2687 */
2688#if 0
2689 /* Not really sure about this - do we need this ? */
2690 if (page->mapping->a_ops->invalidatepage)
2691 page->mapping->a_ops->invalidatepage(page, offset);
2692#endif
2693 unlock_page(page);
2694 return 0; /* don't care */
2695 }
2696
2697 /*
2698 * The page straddles i_size. It must be zeroed out on each and every
2699 * writepage invocation because it may be mmapped. "A file is mapped
2700 * in multiples of the page size. For a file that is not a multiple of
2701 * the page size, the remaining memory is zeroed when mapped, and
2702 * writes to that region are not written out to the file."
2703 */
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002704 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705out:
2706 ret = mpage_writepage(page, get_block, wbc);
2707 if (ret == -EAGAIN)
2708 ret = __block_write_full_page(inode, page, get_block, wbc);
2709 return ret;
2710}
2711EXPORT_SYMBOL(nobh_writepage);
2712
Nick Piggin03158cd2007-10-16 01:25:25 -07002713int nobh_truncate_page(struct address_space *mapping,
2714 loff_t from, get_block_t *get_block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2717 unsigned offset = from & (PAGE_CACHE_SIZE-1);
Nick Piggin03158cd2007-10-16 01:25:25 -07002718 unsigned blocksize;
2719 sector_t iblock;
2720 unsigned length, pos;
2721 struct inode *inode = mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 struct page *page;
Nick Piggin03158cd2007-10-16 01:25:25 -07002723 struct buffer_head map_bh;
2724 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725
Nick Piggin03158cd2007-10-16 01:25:25 -07002726 blocksize = 1 << inode->i_blkbits;
2727 length = offset & (blocksize - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728
Nick Piggin03158cd2007-10-16 01:25:25 -07002729 /* Block boundary? Nothing to do */
2730 if (!length)
2731 return 0;
2732
2733 length = blocksize - length;
2734 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2735
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 page = grab_cache_page(mapping, index);
Nick Piggin03158cd2007-10-16 01:25:25 -07002737 err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 if (!page)
2739 goto out;
2740
Nick Piggin03158cd2007-10-16 01:25:25 -07002741 if (page_has_buffers(page)) {
2742has_buffers:
2743 unlock_page(page);
2744 page_cache_release(page);
2745 return block_truncate_page(mapping, from, get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 }
Nick Piggin03158cd2007-10-16 01:25:25 -07002747
2748 /* Find the buffer that contains "offset" */
2749 pos = blocksize;
2750 while (offset >= pos) {
2751 iblock++;
2752 pos += blocksize;
2753 }
2754
2755 err = get_block(inode, iblock, &map_bh, 0);
2756 if (err)
2757 goto unlock;
2758 /* unmapped? It's a hole - nothing to do */
2759 if (!buffer_mapped(&map_bh))
2760 goto unlock;
2761
2762 /* Ok, it's mapped. Make sure it's up-to-date */
2763 if (!PageUptodate(page)) {
2764 err = mapping->a_ops->readpage(NULL, page);
2765 if (err) {
2766 page_cache_release(page);
2767 goto out;
2768 }
2769 lock_page(page);
2770 if (!PageUptodate(page)) {
2771 err = -EIO;
2772 goto unlock;
2773 }
2774 if (page_has_buffers(page))
2775 goto has_buffers;
2776 }
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002777 zero_user(page, offset, length);
Nick Piggin03158cd2007-10-16 01:25:25 -07002778 set_page_dirty(page);
2779 err = 0;
2780
2781unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782 unlock_page(page);
2783 page_cache_release(page);
2784out:
Nick Piggin03158cd2007-10-16 01:25:25 -07002785 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786}
2787EXPORT_SYMBOL(nobh_truncate_page);
2788
2789int block_truncate_page(struct address_space *mapping,
2790 loff_t from, get_block_t *get_block)
2791{
2792 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2793 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2794 unsigned blocksize;
Andrew Morton54b21a72006-01-08 01:03:05 -08002795 sector_t iblock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796 unsigned length, pos;
2797 struct inode *inode = mapping->host;
2798 struct page *page;
2799 struct buffer_head *bh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800 int err;
2801
2802 blocksize = 1 << inode->i_blkbits;
2803 length = offset & (blocksize - 1);
2804
2805 /* Block boundary? Nothing to do */
2806 if (!length)
2807 return 0;
2808
2809 length = blocksize - length;
Andrew Morton54b21a72006-01-08 01:03:05 -08002810 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811
2812 page = grab_cache_page(mapping, index);
2813 err = -ENOMEM;
2814 if (!page)
2815 goto out;
2816
2817 if (!page_has_buffers(page))
2818 create_empty_buffers(page, blocksize, 0);
2819
2820 /* Find the buffer that contains "offset" */
2821 bh = page_buffers(page);
2822 pos = blocksize;
2823 while (offset >= pos) {
2824 bh = bh->b_this_page;
2825 iblock++;
2826 pos += blocksize;
2827 }
2828
2829 err = 0;
2830 if (!buffer_mapped(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002831 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 err = get_block(inode, iblock, bh, 0);
2833 if (err)
2834 goto unlock;
2835 /* unmapped? It's a hole - nothing to do */
2836 if (!buffer_mapped(bh))
2837 goto unlock;
2838 }
2839
2840 /* Ok, it's mapped. Make sure it's up-to-date */
2841 if (PageUptodate(page))
2842 set_buffer_uptodate(bh);
2843
David Chinner33a266d2007-02-12 00:51:41 -08002844 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845 err = -EIO;
2846 ll_rw_block(READ, 1, &bh);
2847 wait_on_buffer(bh);
2848 /* Uhhuh. Read error. Complain and punt. */
2849 if (!buffer_uptodate(bh))
2850 goto unlock;
2851 }
2852
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002853 zero_user(page, offset, length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 mark_buffer_dirty(bh);
2855 err = 0;
2856
2857unlock:
2858 unlock_page(page);
2859 page_cache_release(page);
2860out:
2861 return err;
2862}
2863
2864/*
2865 * The generic ->writepage function for buffer-backed address_spaces
2866 */
2867int block_write_full_page(struct page *page, get_block_t *get_block,
2868 struct writeback_control *wbc)
2869{
2870 struct inode * const inode = page->mapping->host;
2871 loff_t i_size = i_size_read(inode);
2872 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2873 unsigned offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874
2875 /* Is the page fully inside i_size? */
2876 if (page->index < end_index)
2877 return __block_write_full_page(inode, page, get_block, wbc);
2878
2879 /* Is the page fully outside i_size? (truncate in progress) */
2880 offset = i_size & (PAGE_CACHE_SIZE-1);
2881 if (page->index >= end_index+1 || !offset) {
2882 /*
2883 * The page may have dirty, unmapped buffers. For example,
2884 * they may have been added in ext3_writepage(). Make them
2885 * freeable here, so the page does not leak.
2886 */
Jan Karaaaa40592005-10-30 15:00:16 -08002887 do_invalidatepage(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888 unlock_page(page);
2889 return 0; /* don't care */
2890 }
2891
2892 /*
2893 * The page straddles i_size. It must be zeroed out on each and every
2894 * writepage invokation because it may be mmapped. "A file is mapped
2895 * in multiples of the page size. For a file that is not a multiple of
2896 * the page size, the remaining memory is zeroed when mapped, and
2897 * writes to that region are not written out to the file."
2898 */
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002899 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 return __block_write_full_page(inode, page, get_block, wbc);
2901}
2902
2903sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2904 get_block_t *get_block)
2905{
2906 struct buffer_head tmp;
2907 struct inode *inode = mapping->host;
2908 tmp.b_state = 0;
2909 tmp.b_blocknr = 0;
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002910 tmp.b_size = 1 << inode->i_blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 get_block(inode, block, &tmp, 0);
2912 return tmp.b_blocknr;
2913}
2914
NeilBrown6712ecf2007-09-27 12:47:43 +02002915static void end_bio_bh_io_sync(struct bio *bio, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916{
2917 struct buffer_head *bh = bio->bi_private;
2918
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919 if (err == -EOPNOTSUPP) {
2920 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2921 set_bit(BH_Eopnotsupp, &bh->b_state);
2922 }
2923
Keith Mannthey08bafc02008-11-25 10:24:35 +01002924 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2925 set_bit(BH_Quiet, &bh->b_state);
2926
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2928 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929}
2930
2931int submit_bh(int rw, struct buffer_head * bh)
2932{
2933 struct bio *bio;
2934 int ret = 0;
2935
2936 BUG_ON(!buffer_locked(bh));
2937 BUG_ON(!buffer_mapped(bh));
2938 BUG_ON(!bh->b_end_io);
2939
Jens Axboe48fd4f92008-08-22 10:00:36 +02002940 /*
2941 * Mask in barrier bit for a write (could be either a WRITE or a
2942 * WRITE_SYNC
2943 */
2944 if (buffer_ordered(bh) && (rw & WRITE))
2945 rw |= WRITE_BARRIER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946
2947 /*
Jens Axboe48fd4f92008-08-22 10:00:36 +02002948 * Only clear out a write error when rewriting
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949 */
Jens Axboe48fd4f92008-08-22 10:00:36 +02002950 if (test_set_buffer_req(bh) && (rw & WRITE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951 clear_buffer_write_io_error(bh);
2952
2953 /*
2954 * from here on down, it's all bio -- do the initial mapping,
2955 * submit_bio -> generic_make_request may further map this bio around
2956 */
2957 bio = bio_alloc(GFP_NOIO, 1);
2958
2959 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2960 bio->bi_bdev = bh->b_bdev;
2961 bio->bi_io_vec[0].bv_page = bh->b_page;
2962 bio->bi_io_vec[0].bv_len = bh->b_size;
2963 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2964
2965 bio->bi_vcnt = 1;
2966 bio->bi_idx = 0;
2967 bio->bi_size = bh->b_size;
2968
2969 bio->bi_end_io = end_bio_bh_io_sync;
2970 bio->bi_private = bh;
2971
2972 bio_get(bio);
2973 submit_bio(rw, bio);
2974
2975 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2976 ret = -EOPNOTSUPP;
2977
2978 bio_put(bio);
2979 return ret;
2980}
2981
2982/**
2983 * ll_rw_block: low-level access to block devices (DEPRECATED)
Jan Karaa7662232005-09-06 15:19:10 -07002984 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985 * @nr: number of &struct buffer_heads in the array
2986 * @bhs: array of pointers to &struct buffer_head
2987 *
Jan Karaa7662232005-09-06 15:19:10 -07002988 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2989 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2990 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2991 * are sent to disk. The fourth %READA option is described in the documentation
2992 * for generic_make_request() which ll_rw_block() calls.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993 *
2994 * This function drops any buffer that it cannot get a lock on (with the
Jan Karaa7662232005-09-06 15:19:10 -07002995 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2996 * clean when doing a write request, and any buffer that appears to be
2997 * up-to-date when doing read request. Further it marks as clean buffers that
2998 * are processed for writing (the buffer cache won't assume that they are
2999 * actually clean until the buffer gets unlocked).
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000 *
3001 * ll_rw_block sets b_end_io to simple completion handler that marks
3002 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
3003 * any waiters.
3004 *
3005 * All of the buffers must be for the same device, and must also be a
3006 * multiple of the current approved size for the device.
3007 */
3008void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3009{
3010 int i;
3011
3012 for (i = 0; i < nr; i++) {
3013 struct buffer_head *bh = bhs[i];
3014
Jens Axboe18ce3752008-07-01 09:07:34 +02003015 if (rw == SWRITE || rw == SWRITE_SYNC)
Jan Karaa7662232005-09-06 15:19:10 -07003016 lock_buffer(bh);
Nick Pigginca5de402008-08-02 12:02:13 +02003017 else if (!trylock_buffer(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003018 continue;
3019
Jens Axboe18ce3752008-07-01 09:07:34 +02003020 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021 if (test_clear_buffer_dirty(bh)) {
akpm@osdl.org76c30732005-04-16 15:24:07 -07003022 bh->b_end_io = end_buffer_write_sync;
OGAWA Hirofumie60e5c52006-02-03 03:04:43 -08003023 get_bh(bh);
Jens Axboe18ce3752008-07-01 09:07:34 +02003024 if (rw == SWRITE_SYNC)
3025 submit_bh(WRITE_SYNC, bh);
3026 else
3027 submit_bh(WRITE, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028 continue;
3029 }
3030 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031 if (!buffer_uptodate(bh)) {
akpm@osdl.org76c30732005-04-16 15:24:07 -07003032 bh->b_end_io = end_buffer_read_sync;
OGAWA Hirofumie60e5c52006-02-03 03:04:43 -08003033 get_bh(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034 submit_bh(rw, bh);
3035 continue;
3036 }
3037 }
3038 unlock_buffer(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039 }
3040}
3041
3042/*
3043 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3044 * and then start new I/O and then wait upon it. The caller must have a ref on
3045 * the buffer_head.
3046 */
3047int sync_dirty_buffer(struct buffer_head *bh)
3048{
3049 int ret = 0;
3050
3051 WARN_ON(atomic_read(&bh->b_count) < 1);
3052 lock_buffer(bh);
3053 if (test_clear_buffer_dirty(bh)) {
3054 get_bh(bh);
3055 bh->b_end_io = end_buffer_write_sync;
Jens Axboe18ce3752008-07-01 09:07:34 +02003056 ret = submit_bh(WRITE_SYNC, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057 wait_on_buffer(bh);
3058 if (buffer_eopnotsupp(bh)) {
3059 clear_buffer_eopnotsupp(bh);
3060 ret = -EOPNOTSUPP;
3061 }
3062 if (!ret && !buffer_uptodate(bh))
3063 ret = -EIO;
3064 } else {
3065 unlock_buffer(bh);
3066 }
3067 return ret;
3068}
3069
3070/*
3071 * try_to_free_buffers() checks if all the buffers on this particular page
3072 * are unused, and releases them if so.
3073 *
3074 * Exclusion against try_to_free_buffers may be obtained by either
3075 * locking the page or by holding its mapping's private_lock.
3076 *
3077 * If the page is dirty but all the buffers are clean then we need to
3078 * be sure to mark the page clean as well. This is because the page
3079 * may be against a block device, and a later reattachment of buffers
3080 * to a dirty page will set *all* buffers dirty. Which would corrupt
3081 * filesystem data on the same device.
3082 *
3083 * The same applies to regular filesystem pages: if all the buffers are
3084 * clean then we set the page clean and proceed. To do that, we require
3085 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3086 * private_lock.
3087 *
3088 * try_to_free_buffers() is non-blocking.
3089 */
3090static inline int buffer_busy(struct buffer_head *bh)
3091{
3092 return atomic_read(&bh->b_count) |
3093 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3094}
3095
3096static int
3097drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3098{
3099 struct buffer_head *head = page_buffers(page);
3100 struct buffer_head *bh;
3101
3102 bh = head;
3103 do {
akpm@osdl.orgde7d5a32005-05-01 08:58:39 -07003104 if (buffer_write_io_error(bh) && page->mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003105 set_bit(AS_EIO, &page->mapping->flags);
3106 if (buffer_busy(bh))
3107 goto failed;
3108 bh = bh->b_this_page;
3109 } while (bh != head);
3110
3111 do {
3112 struct buffer_head *next = bh->b_this_page;
3113
Jan Kara535ee2f2008-02-08 04:21:59 -08003114 if (bh->b_assoc_map)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003115 __remove_assoc_queue(bh);
3116 bh = next;
3117 } while (bh != head);
3118 *buffers_to_free = head;
3119 __clear_page_buffers(page);
3120 return 1;
3121failed:
3122 return 0;
3123}
3124
3125int try_to_free_buffers(struct page *page)
3126{
3127 struct address_space * const mapping = page->mapping;
3128 struct buffer_head *buffers_to_free = NULL;
3129 int ret = 0;
3130
3131 BUG_ON(!PageLocked(page));
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003132 if (PageWriteback(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003133 return 0;
3134
3135 if (mapping == NULL) { /* can this still happen? */
3136 ret = drop_buffers(page, &buffers_to_free);
3137 goto out;
3138 }
3139
3140 spin_lock(&mapping->private_lock);
3141 ret = drop_buffers(page, &buffers_to_free);
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003142
3143 /*
3144 * If the filesystem writes its buffers by hand (eg ext3)
3145 * then we can have clean buffers against a dirty page. We
3146 * clean the page here; otherwise the VM will never notice
3147 * that the filesystem did any IO at all.
3148 *
3149 * Also, during truncate, discard_buffer will have marked all
3150 * the page's buffers clean. We discover that here and clean
3151 * the page also.
Nick Piggin87df7242007-01-30 14:36:27 +11003152 *
3153 * private_lock must be held over this entire operation in order
3154 * to synchronise against __set_page_dirty_buffers and prevent the
3155 * dirty bit from being lost.
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003156 */
3157 if (ret)
3158 cancel_dirty_page(page, PAGE_CACHE_SIZE);
Nick Piggin87df7242007-01-30 14:36:27 +11003159 spin_unlock(&mapping->private_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160out:
3161 if (buffers_to_free) {
3162 struct buffer_head *bh = buffers_to_free;
3163
3164 do {
3165 struct buffer_head *next = bh->b_this_page;
3166 free_buffer_head(bh);
3167 bh = next;
3168 } while (bh != buffers_to_free);
3169 }
3170 return ret;
3171}
3172EXPORT_SYMBOL(try_to_free_buffers);
3173
NeilBrown3978d7172006-03-26 01:37:17 -08003174void block_sync_page(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175{
3176 struct address_space *mapping;
3177
3178 smp_mb();
3179 mapping = page_mapping(page);
3180 if (mapping)
3181 blk_run_backing_dev(mapping->backing_dev_info, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182}
3183
3184/*
3185 * There are no bdflush tunables left. But distributions are
3186 * still running obsolete flush daemons, so we terminate them here.
3187 *
3188 * Use of bdflush() is deprecated and will be removed in a future kernel.
3189 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3190 */
3191asmlinkage long sys_bdflush(int func, long data)
3192{
3193 static int msg_count;
3194
3195 if (!capable(CAP_SYS_ADMIN))
3196 return -EPERM;
3197
3198 if (msg_count < 5) {
3199 msg_count++;
3200 printk(KERN_INFO
3201 "warning: process `%s' used the obsolete bdflush"
3202 " system call\n", current->comm);
3203 printk(KERN_INFO "Fix your initscripts?\n");
3204 }
3205
3206 if (func == 1)
3207 do_exit(0);
3208 return 0;
3209}
3210
3211/*
3212 * Buffer-head allocation
3213 */
Christoph Lametere18b8902006-12-06 20:33:20 -08003214static struct kmem_cache *bh_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215
3216/*
3217 * Once the number of bh's in the machine exceeds this level, we start
3218 * stripping them in writeback.
3219 */
3220static int max_buffer_heads;
3221
3222int buffer_heads_over_limit;
3223
3224struct bh_accounting {
3225 int nr; /* Number of live bh's */
3226 int ratelimit; /* Limit cacheline bouncing */
3227};
3228
3229static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3230
3231static void recalc_bh_state(void)
3232{
3233 int i;
3234 int tot = 0;
3235
3236 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3237 return;
3238 __get_cpu_var(bh_accounting).ratelimit = 0;
Eric Dumazet8a143422006-03-24 03:18:10 -08003239 for_each_online_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240 tot += per_cpu(bh_accounting, i).nr;
3241 buffer_heads_over_limit = (tot > max_buffer_heads);
3242}
3243
Al Virodd0fc662005-10-07 07:46:04 +01003244struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003245{
Christoph Lameter488514d2008-04-28 02:12:05 -07003246 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247 if (ret) {
Christoph Lametera35afb82007-05-16 22:10:57 -07003248 INIT_LIST_HEAD(&ret->b_assoc_buffers);
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003249 get_cpu_var(bh_accounting).nr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003250 recalc_bh_state();
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003251 put_cpu_var(bh_accounting);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003252 }
3253 return ret;
3254}
3255EXPORT_SYMBOL(alloc_buffer_head);
3256
3257void free_buffer_head(struct buffer_head *bh)
3258{
3259 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3260 kmem_cache_free(bh_cachep, bh);
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003261 get_cpu_var(bh_accounting).nr--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262 recalc_bh_state();
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003263 put_cpu_var(bh_accounting);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264}
3265EXPORT_SYMBOL(free_buffer_head);
3266
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267static void buffer_exit_cpu(int cpu)
3268{
3269 int i;
3270 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3271
3272 for (i = 0; i < BH_LRU_SIZE; i++) {
3273 brelse(b->bhs[i]);
3274 b->bhs[i] = NULL;
3275 }
Eric Dumazet8a143422006-03-24 03:18:10 -08003276 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3277 per_cpu(bh_accounting, cpu).nr = 0;
3278 put_cpu_var(bh_accounting);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003279}
3280
3281static int buffer_cpu_notify(struct notifier_block *self,
3282 unsigned long action, void *hcpu)
3283{
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07003284 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285 buffer_exit_cpu((unsigned long)hcpu);
3286 return NOTIFY_OK;
3287}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288
Aneesh Kumar K.V389d1b02008-01-28 23:58:26 -05003289/**
Randy Dunlapa6b91912008-03-19 17:01:00 -07003290 * bh_uptodate_or_lock - Test whether the buffer is uptodate
Aneesh Kumar K.V389d1b02008-01-28 23:58:26 -05003291 * @bh: struct buffer_head
3292 *
3293 * Return true if the buffer is up-to-date and false,
3294 * with the buffer locked, if not.
3295 */
3296int bh_uptodate_or_lock(struct buffer_head *bh)
3297{
3298 if (!buffer_uptodate(bh)) {
3299 lock_buffer(bh);
3300 if (!buffer_uptodate(bh))
3301 return 0;
3302 unlock_buffer(bh);
3303 }
3304 return 1;
3305}
3306EXPORT_SYMBOL(bh_uptodate_or_lock);
3307
3308/**
Randy Dunlapa6b91912008-03-19 17:01:00 -07003309 * bh_submit_read - Submit a locked buffer for reading
Aneesh Kumar K.V389d1b02008-01-28 23:58:26 -05003310 * @bh: struct buffer_head
3311 *
3312 * Returns zero on success and -EIO on error.
3313 */
3314int bh_submit_read(struct buffer_head *bh)
3315{
3316 BUG_ON(!buffer_locked(bh));
3317
3318 if (buffer_uptodate(bh)) {
3319 unlock_buffer(bh);
3320 return 0;
3321 }
3322
3323 get_bh(bh);
3324 bh->b_end_io = end_buffer_read_sync;
3325 submit_bh(READ, bh);
3326 wait_on_buffer(bh);
3327 if (buffer_uptodate(bh))
3328 return 0;
3329 return -EIO;
3330}
3331EXPORT_SYMBOL(bh_submit_read);
3332
Christoph Lameterb98938c2008-02-04 22:28:36 -08003333static void
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07003334init_buffer_head(void *data)
Christoph Lameterb98938c2008-02-04 22:28:36 -08003335{
3336 struct buffer_head *bh = data;
3337
3338 memset(bh, 0, sizeof(*bh));
3339 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3340}
3341
Linus Torvalds1da177e2005-04-16 15:20:36 -07003342void __init buffer_init(void)
3343{
3344 int nrpages;
3345
Christoph Lameterb98938c2008-02-04 22:28:36 -08003346 bh_cachep = kmem_cache_create("buffer_head",
3347 sizeof(struct buffer_head), 0,
3348 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3349 SLAB_MEM_SPREAD),
3350 init_buffer_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351
3352 /*
3353 * Limit the bh occupancy to 10% of ZONE_NORMAL
3354 */
3355 nrpages = (nr_free_buffer_pages() * 10) / 100;
3356 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3357 hotcpu_notifier(buffer_cpu_notify, 0);
3358}
3359
3360EXPORT_SYMBOL(__bforget);
3361EXPORT_SYMBOL(__brelse);
3362EXPORT_SYMBOL(__wait_on_buffer);
3363EXPORT_SYMBOL(block_commit_write);
3364EXPORT_SYMBOL(block_prepare_write);
David Chinner54171692007-07-19 17:39:55 +10003365EXPORT_SYMBOL(block_page_mkwrite);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366EXPORT_SYMBOL(block_read_full_page);
3367EXPORT_SYMBOL(block_sync_page);
3368EXPORT_SYMBOL(block_truncate_page);
3369EXPORT_SYMBOL(block_write_full_page);
Nick Piggin89e10782007-10-16 01:25:07 -07003370EXPORT_SYMBOL(cont_write_begin);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371EXPORT_SYMBOL(end_buffer_read_sync);
3372EXPORT_SYMBOL(end_buffer_write_sync);
3373EXPORT_SYMBOL(file_fsync);
3374EXPORT_SYMBOL(fsync_bdev);
3375EXPORT_SYMBOL(generic_block_bmap);
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08003376EXPORT_SYMBOL(generic_cont_expand_simple);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377EXPORT_SYMBOL(init_buffer);
3378EXPORT_SYMBOL(invalidate_bdev);
3379EXPORT_SYMBOL(ll_rw_block);
3380EXPORT_SYMBOL(mark_buffer_dirty);
3381EXPORT_SYMBOL(submit_bh);
3382EXPORT_SYMBOL(sync_dirty_buffer);
3383EXPORT_SYMBOL(unlock_buffer);