blob: 209f7f15f5f801b4023f6e121643b3744ce1b11e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/kernel.h>
22#include <linux/syscalls.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
Randy Dunlap16f7e0f2006-01-11 12:17:46 -080027#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/blkdev.h>
29#include <linux/file.h>
30#include <linux/quotaops.h>
31#include <linux/highmem.h>
32#include <linux/module.h>
33#include <linux/writeback.h>
34#include <linux/hash.h>
35#include <linux/suspend.h>
36#include <linux/buffer_head.h>
Andrew Morton55e829a2006-12-10 02:19:27 -080037#include <linux/task_io_accounting_ops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/bio.h>
39#include <linux/notifier.h>
40#include <linux/cpu.h>
41#include <linux/bitops.h>
42#include <linux/mpage.h>
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070043#include <linux/bit_spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49inline void
50init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51{
52 bh->b_end_io = handler;
53 bh->b_private = private;
54}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -070055EXPORT_SYMBOL(init_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57static int sync_buffer(void *word)
58{
59 struct block_device *bd;
60 struct buffer_head *bh
61 = container_of(word, struct buffer_head, b_state);
62
63 smp_mb();
64 bd = bh->b_bdev;
65 if (bd)
66 blk_run_address_space(bd->bd_inode->i_mapping);
67 io_schedule();
68 return 0;
69}
70
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -080071void __lock_buffer(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072{
73 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
74 TASK_UNINTERRUPTIBLE);
75}
76EXPORT_SYMBOL(__lock_buffer);
77
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -080078void unlock_buffer(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079{
Nick Piggin51b07fc2008-10-18 20:27:00 -070080 clear_bit_unlock(BH_Lock, &bh->b_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 smp_mb__after_clear_bit();
82 wake_up_bit(&bh->b_state, BH_Lock);
83}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -070084EXPORT_SYMBOL(unlock_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
86/*
87 * Block until a buffer comes unlocked. This doesn't stop it
88 * from becoming locked again - you have to lock it yourself
89 * if you want to preserve its state.
90 */
91void __wait_on_buffer(struct buffer_head * bh)
92{
93 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
94}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -070095EXPORT_SYMBOL(__wait_on_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
97static void
98__clear_page_buffers(struct page *page)
99{
100 ClearPagePrivate(page);
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700101 set_page_private(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 page_cache_release(page);
103}
104
Keith Mannthey08bafc02008-11-25 10:24:35 +0100105
106static int quiet_error(struct buffer_head *bh)
107{
108 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
109 return 0;
110 return 1;
111}
112
113
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114static void buffer_io_error(struct buffer_head *bh)
115{
116 char b[BDEVNAME_SIZE];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
118 bdevname(bh->b_bdev, b),
119 (unsigned long long)bh->b_blocknr);
120}
121
122/*
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700123 * End-of-IO handler helper function which does not touch the bh after
124 * unlocking it.
125 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
126 * a race there is benign: unlock_buffer() only use the bh's address for
127 * hashing after unlocking the buffer, so it doesn't actually touch the bh
128 * itself.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 */
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700130static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131{
132 if (uptodate) {
133 set_buffer_uptodate(bh);
134 } else {
135 /* This happens, due to failed READA attempts. */
136 clear_buffer_uptodate(bh);
137 }
138 unlock_buffer(bh);
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700139}
140
141/*
142 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
143 * unlock the buffer. This is what ll_rw_block uses too.
144 */
145void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
146{
147 __end_buffer_read_notouch(bh, uptodate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 put_bh(bh);
149}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700150EXPORT_SYMBOL(end_buffer_read_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
152void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
153{
154 char b[BDEVNAME_SIZE];
155
156 if (uptodate) {
157 set_buffer_uptodate(bh);
158 } else {
Keith Mannthey08bafc02008-11-25 10:24:35 +0100159 if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 buffer_io_error(bh);
161 printk(KERN_WARNING "lost page write due to "
162 "I/O error on %s\n",
163 bdevname(bh->b_bdev, b));
164 }
165 set_buffer_write_io_error(bh);
166 clear_buffer_uptodate(bh);
167 }
168 unlock_buffer(bh);
169 put_bh(bh);
170}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700171EXPORT_SYMBOL(end_buffer_write_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
173/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 * Various filesystems appear to want __find_get_block to be non-blocking.
175 * But it's the page lock which protects the buffers. To get around this,
176 * we get exclusion from try_to_free_buffers with the blockdev mapping's
177 * private_lock.
178 *
179 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
180 * may be quite high. This code could TryLock the page, and if that
181 * succeeds, there is no need to take private_lock. (But if
182 * private_lock is contended then so is mapping->tree_lock).
183 */
184static struct buffer_head *
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -0800185__find_get_block_slow(struct block_device *bdev, sector_t block)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186{
187 struct inode *bd_inode = bdev->bd_inode;
188 struct address_space *bd_mapping = bd_inode->i_mapping;
189 struct buffer_head *ret = NULL;
190 pgoff_t index;
191 struct buffer_head *bh;
192 struct buffer_head *head;
193 struct page *page;
194 int all_mapped = 1;
195
196 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
197 page = find_get_page(bd_mapping, index);
198 if (!page)
199 goto out;
200
201 spin_lock(&bd_mapping->private_lock);
202 if (!page_has_buffers(page))
203 goto out_unlock;
204 head = page_buffers(page);
205 bh = head;
206 do {
Nikanth Karthikesan97f76d32009-04-02 16:56:46 -0700207 if (!buffer_mapped(bh))
208 all_mapped = 0;
209 else if (bh->b_blocknr == block) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 ret = bh;
211 get_bh(bh);
212 goto out_unlock;
213 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 bh = bh->b_this_page;
215 } while (bh != head);
216
217 /* we might be here because some of the buffers on this page are
218 * not mapped. This is due to various races between
219 * file io on the block device and getblk. It gets dealt with
220 * elsewhere, don't buffer_error if we had some unmapped buffers
221 */
222 if (all_mapped) {
223 printk("__find_get_block_slow() failed. "
224 "block=%llu, b_blocknr=%llu\n",
Badari Pulavarty205f87f2006-03-26 01:38:00 -0800225 (unsigned long long)block,
226 (unsigned long long)bh->b_blocknr);
227 printk("b_state=0x%08lx, b_size=%zu\n",
228 bh->b_state, bh->b_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
230 }
231out_unlock:
232 spin_unlock(&bd_mapping->private_lock);
233 page_cache_release(page);
234out:
235 return ret;
236}
237
238/* If invalidate_buffers() will trash dirty buffers, it means some kind
239 of fs corruption is going on. Trashing dirty data always imply losing
240 information that was supposed to be just stored on the physical layer
241 by the user.
242
243 Thus invalidate_buffers in general usage is not allwowed to trash
244 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
245 be preserved. These buffers are simply skipped.
246
247 We also skip buffers which are still in use. For example this can
248 happen if a userspace program is reading the block device.
249
250 NOTE: In the case where the user removed a removable-media-disk even if
251 there's still dirty data not synced on disk (due a bug in the device driver
252 or due an error of the user), by not destroying the dirty buffers we could
253 generate corruption also on the next media inserted, thus a parameter is
254 necessary to handle this case in the most safe way possible (trying
255 to not corrupt also the new disk inserted with the data belonging to
256 the old now corrupted disk). Also for the ramdisk the natural thing
257 to do in order to release the ramdisk memory is to destroy dirty buffers.
258
259 These are two special cases. Normal usage imply the device driver
260 to issue a sync on the device (without waiting I/O completion) and
261 then an invalidate_buffers call that doesn't trash dirty buffers.
262
263 For handling cache coherency with the blkdev pagecache the 'update' case
264 is been introduced. It is needed to re-read from disk any pinned
265 buffer. NOTE: re-reading from disk is destructive so we can do it only
266 when we assume nobody is changing the buffercache under our I/O and when
267 we think the disk contains more recent information than the buffercache.
268 The update == 1 pass marks the buffers we need to update, the update == 2
269 pass does the actual I/O. */
Peter Zijlstraf98393a2007-05-06 14:49:54 -0700270void invalidate_bdev(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271{
Andrew Morton0e1dfc62006-07-30 03:03:28 -0700272 struct address_space *mapping = bdev->bd_inode->i_mapping;
273
274 if (mapping->nrpages == 0)
275 return;
276
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 invalidate_bh_lrus();
Andrew Mortonfc0ecff2007-02-10 01:45:39 -0800278 invalidate_mapping_pages(mapping, 0, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700280EXPORT_SYMBOL(invalidate_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
282/*
283 * Kick pdflush then try to free up some ZONE_NORMAL memory.
284 */
285static void free_more_memory(void)
286{
Mel Gorman19770b32008-04-28 02:12:18 -0700287 struct zone *zone;
Mel Gorman0e884602008-04-28 02:12:14 -0700288 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289
Jens Axboe03ba3782009-09-09 09:08:54 +0200290 wakeup_flusher_threads(1024);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 yield();
292
Mel Gorman0e884602008-04-28 02:12:14 -0700293 for_each_online_node(nid) {
Mel Gorman19770b32008-04-28 02:12:18 -0700294 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
295 gfp_zone(GFP_NOFS), NULL,
296 &zone);
297 if (zone)
Mel Gorman54a6eb52008-04-28 02:12:16 -0700298 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
KAMEZAWA Hiroyuki327c0e92009-03-31 15:23:31 -0700299 GFP_NOFS, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 }
301}
302
303/*
304 * I/O completion handler for block_read_full_page() - pages
305 * which come unlocked at the end of I/O.
306 */
307static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
308{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 unsigned long flags;
Nick Piggina3972202005-07-07 17:56:56 -0700310 struct buffer_head *first;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 struct buffer_head *tmp;
312 struct page *page;
313 int page_uptodate = 1;
314
315 BUG_ON(!buffer_async_read(bh));
316
317 page = bh->b_page;
318 if (uptodate) {
319 set_buffer_uptodate(bh);
320 } else {
321 clear_buffer_uptodate(bh);
Keith Mannthey08bafc02008-11-25 10:24:35 +0100322 if (!quiet_error(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 buffer_io_error(bh);
324 SetPageError(page);
325 }
326
327 /*
328 * Be _very_ careful from here on. Bad things can happen if
329 * two buffer heads end IO at almost the same time and both
330 * decide that the page is now completely done.
331 */
Nick Piggina3972202005-07-07 17:56:56 -0700332 first = page_buffers(page);
333 local_irq_save(flags);
334 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 clear_buffer_async_read(bh);
336 unlock_buffer(bh);
337 tmp = bh;
338 do {
339 if (!buffer_uptodate(tmp))
340 page_uptodate = 0;
341 if (buffer_async_read(tmp)) {
342 BUG_ON(!buffer_locked(tmp));
343 goto still_busy;
344 }
345 tmp = tmp->b_this_page;
346 } while (tmp != bh);
Nick Piggina3972202005-07-07 17:56:56 -0700347 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
348 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349
350 /*
351 * If none of the buffers had errors and they are all
352 * uptodate then we can set the page uptodate.
353 */
354 if (page_uptodate && !PageError(page))
355 SetPageUptodate(page);
356 unlock_page(page);
357 return;
358
359still_busy:
Nick Piggina3972202005-07-07 17:56:56 -0700360 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
361 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 return;
363}
364
365/*
366 * Completion handler for block_write_full_page() - pages which are unlocked
367 * during I/O, and which have PageWriteback cleared upon I/O completion.
368 */
Chris Mason35c80d52009-04-15 13:22:38 -0400369void end_buffer_async_write(struct buffer_head *bh, int uptodate)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370{
371 char b[BDEVNAME_SIZE];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 unsigned long flags;
Nick Piggina3972202005-07-07 17:56:56 -0700373 struct buffer_head *first;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 struct buffer_head *tmp;
375 struct page *page;
376
377 BUG_ON(!buffer_async_write(bh));
378
379 page = bh->b_page;
380 if (uptodate) {
381 set_buffer_uptodate(bh);
382 } else {
Keith Mannthey08bafc02008-11-25 10:24:35 +0100383 if (!quiet_error(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 buffer_io_error(bh);
385 printk(KERN_WARNING "lost page write due to "
386 "I/O error on %s\n",
387 bdevname(bh->b_bdev, b));
388 }
389 set_bit(AS_EIO, &page->mapping->flags);
Jan Kara58ff4072006-10-17 00:10:19 -0700390 set_buffer_write_io_error(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 clear_buffer_uptodate(bh);
392 SetPageError(page);
393 }
394
Nick Piggina3972202005-07-07 17:56:56 -0700395 first = page_buffers(page);
396 local_irq_save(flags);
397 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
398
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 clear_buffer_async_write(bh);
400 unlock_buffer(bh);
401 tmp = bh->b_this_page;
402 while (tmp != bh) {
403 if (buffer_async_write(tmp)) {
404 BUG_ON(!buffer_locked(tmp));
405 goto still_busy;
406 }
407 tmp = tmp->b_this_page;
408 }
Nick Piggina3972202005-07-07 17:56:56 -0700409 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
410 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 end_page_writeback(page);
412 return;
413
414still_busy:
Nick Piggina3972202005-07-07 17:56:56 -0700415 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
416 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 return;
418}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700419EXPORT_SYMBOL(end_buffer_async_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420
421/*
422 * If a page's buffers are under async readin (end_buffer_async_read
423 * completion) then there is a possibility that another thread of
424 * control could lock one of the buffers after it has completed
425 * but while some of the other buffers have not completed. This
426 * locked buffer would confuse end_buffer_async_read() into not unlocking
427 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
428 * that this buffer is not under async I/O.
429 *
430 * The page comes unlocked when it has no locked buffer_async buffers
431 * left.
432 *
433 * PageLocked prevents anyone starting new async I/O reads any of
434 * the buffers.
435 *
436 * PageWriteback is used to prevent simultaneous writeout of the same
437 * page.
438 *
439 * PageLocked prevents anyone from starting writeback of a page which is
440 * under read I/O (PageWriteback is only ever set against a locked page).
441 */
442static void mark_buffer_async_read(struct buffer_head *bh)
443{
444 bh->b_end_io = end_buffer_async_read;
445 set_buffer_async_read(bh);
446}
447
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700448static void mark_buffer_async_write_endio(struct buffer_head *bh,
449 bh_end_io_t *handler)
Chris Mason35c80d52009-04-15 13:22:38 -0400450{
451 bh->b_end_io = handler;
452 set_buffer_async_write(bh);
453}
454
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455void mark_buffer_async_write(struct buffer_head *bh)
456{
Chris Mason35c80d52009-04-15 13:22:38 -0400457 mark_buffer_async_write_endio(bh, end_buffer_async_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458}
459EXPORT_SYMBOL(mark_buffer_async_write);
460
461
462/*
463 * fs/buffer.c contains helper functions for buffer-backed address space's
464 * fsync functions. A common requirement for buffer-based filesystems is
465 * that certain data from the backing blockdev needs to be written out for
466 * a successful fsync(). For example, ext2 indirect blocks need to be
467 * written back and waited upon before fsync() returns.
468 *
469 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
470 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
471 * management of a list of dependent buffers at ->i_mapping->private_list.
472 *
473 * Locking is a little subtle: try_to_free_buffers() will remove buffers
474 * from their controlling inode's queue when they are being freed. But
475 * try_to_free_buffers() will be operating against the *blockdev* mapping
476 * at the time, not against the S_ISREG file which depends on those buffers.
477 * So the locking for private_list is via the private_lock in the address_space
478 * which backs the buffers. Which is different from the address_space
479 * against which the buffers are listed. So for a particular address_space,
480 * mapping->private_lock does *not* protect mapping->private_list! In fact,
481 * mapping->private_list will always be protected by the backing blockdev's
482 * ->private_lock.
483 *
484 * Which introduces a requirement: all buffers on an address_space's
485 * ->private_list must be from the same address_space: the blockdev's.
486 *
487 * address_spaces which do not place buffers at ->private_list via these
488 * utility functions are free to use private_lock and private_list for
489 * whatever they want. The only requirement is that list_empty(private_list)
490 * be true at clear_inode() time.
491 *
492 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
493 * filesystems should do that. invalidate_inode_buffers() should just go
494 * BUG_ON(!list_empty).
495 *
496 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
497 * take an address_space, not an inode. And it should be called
498 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
499 * queued up.
500 *
501 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
502 * list if it is already on a list. Because if the buffer is on a list,
503 * it *must* already be on the right one. If not, the filesystem is being
504 * silly. This will save a ton of locking. But first we have to ensure
505 * that buffers are taken *off* the old inode's list when they are freed
506 * (presumably in truncate). That requires careful auditing of all
507 * filesystems (do it inside bforget()). It could also be done by bringing
508 * b_inode back.
509 */
510
511/*
512 * The buffer's backing address_space's private_lock must be held
513 */
Thomas Petazzonidbacefc2008-07-29 22:33:47 -0700514static void __remove_assoc_queue(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515{
516 list_del_init(&bh->b_assoc_buffers);
Jan Kara58ff4072006-10-17 00:10:19 -0700517 WARN_ON(!bh->b_assoc_map);
518 if (buffer_write_io_error(bh))
519 set_bit(AS_EIO, &bh->b_assoc_map->flags);
520 bh->b_assoc_map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521}
522
523int inode_has_buffers(struct inode *inode)
524{
525 return !list_empty(&inode->i_data.private_list);
526}
527
528/*
529 * osync is designed to support O_SYNC io. It waits synchronously for
530 * all already-submitted IO to complete, but does not queue any new
531 * writes to the disk.
532 *
533 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
534 * you dirty the buffers, and then use osync_inode_buffers to wait for
535 * completion. Any other dirty buffers which are not yet queued for
536 * write will not be flushed to disk by the osync.
537 */
538static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
539{
540 struct buffer_head *bh;
541 struct list_head *p;
542 int err = 0;
543
544 spin_lock(lock);
545repeat:
546 list_for_each_prev(p, list) {
547 bh = BH_ENTRY(p);
548 if (buffer_locked(bh)) {
549 get_bh(bh);
550 spin_unlock(lock);
551 wait_on_buffer(bh);
552 if (!buffer_uptodate(bh))
553 err = -EIO;
554 brelse(bh);
555 spin_lock(lock);
556 goto repeat;
557 }
558 }
559 spin_unlock(lock);
560 return err;
561}
562
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700563static void do_thaw_all(struct work_struct *work)
Eric Sandeenc2d75432009-03-31 15:23:46 -0700564{
565 struct super_block *sb;
566 char b[BDEVNAME_SIZE];
567
568 spin_lock(&sb_lock);
569restart:
570 list_for_each_entry(sb, &super_blocks, s_list) {
571 sb->s_count++;
572 spin_unlock(&sb_lock);
573 down_read(&sb->s_umount);
574 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
575 printk(KERN_WARNING "Emergency Thaw on %s\n",
576 bdevname(sb->s_bdev, b));
577 up_read(&sb->s_umount);
578 spin_lock(&sb_lock);
579 if (__put_super_and_need_restart(sb))
580 goto restart;
581 }
582 spin_unlock(&sb_lock);
Jens Axboe053c5252009-04-08 13:44:08 +0200583 kfree(work);
Eric Sandeenc2d75432009-03-31 15:23:46 -0700584 printk(KERN_WARNING "Emergency Thaw complete\n");
585}
586
587/**
588 * emergency_thaw_all -- forcibly thaw every frozen filesystem
589 *
590 * Used for emergency unfreeze of all filesystems via SysRq
591 */
592void emergency_thaw_all(void)
593{
Jens Axboe053c5252009-04-08 13:44:08 +0200594 struct work_struct *work;
595
596 work = kmalloc(sizeof(*work), GFP_ATOMIC);
597 if (work) {
598 INIT_WORK(work, do_thaw_all);
599 schedule_work(work);
600 }
Eric Sandeenc2d75432009-03-31 15:23:46 -0700601}
602
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603/**
Randy Dunlap78a4a502008-02-29 22:02:31 -0800604 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
Martin Waitz67be2dd2005-05-01 08:59:26 -0700605 * @mapping: the mapping which wants those buffers written
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 *
607 * Starts I/O against the buffers at mapping->private_list, and waits upon
608 * that I/O.
609 *
Martin Waitz67be2dd2005-05-01 08:59:26 -0700610 * Basically, this is a convenience function for fsync().
611 * @mapping is a file or directory which needs those buffers to be written for
612 * a successful fsync().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 */
614int sync_mapping_buffers(struct address_space *mapping)
615{
616 struct address_space *buffer_mapping = mapping->assoc_mapping;
617
618 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
619 return 0;
620
621 return fsync_buffers_list(&buffer_mapping->private_lock,
622 &mapping->private_list);
623}
624EXPORT_SYMBOL(sync_mapping_buffers);
625
626/*
627 * Called when we've recently written block `bblock', and it is known that
628 * `bblock' was for a buffer_boundary() buffer. This means that the block at
629 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
630 * dirty, schedule it for IO. So that indirects merge nicely with their data.
631 */
632void write_boundary_block(struct block_device *bdev,
633 sector_t bblock, unsigned blocksize)
634{
635 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
636 if (bh) {
637 if (buffer_dirty(bh))
638 ll_rw_block(WRITE, 1, &bh);
639 put_bh(bh);
640 }
641}
642
643void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
644{
645 struct address_space *mapping = inode->i_mapping;
646 struct address_space *buffer_mapping = bh->b_page->mapping;
647
648 mark_buffer_dirty(bh);
649 if (!mapping->assoc_mapping) {
650 mapping->assoc_mapping = buffer_mapping;
651 } else {
Eric Sesterhenne827f922006-03-26 18:24:46 +0200652 BUG_ON(mapping->assoc_mapping != buffer_mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 }
Jan Kara535ee2f2008-02-08 04:21:59 -0800654 if (!bh->b_assoc_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 spin_lock(&buffer_mapping->private_lock);
656 list_move_tail(&bh->b_assoc_buffers,
657 &mapping->private_list);
Jan Kara58ff4072006-10-17 00:10:19 -0700658 bh->b_assoc_map = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 spin_unlock(&buffer_mapping->private_lock);
660 }
661}
662EXPORT_SYMBOL(mark_buffer_dirty_inode);
663
664/*
Nick Piggin787d2212007-07-17 04:03:34 -0700665 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
666 * dirty.
667 *
668 * If warn is true, then emit a warning if the page is not uptodate and has
669 * not been truncated.
670 */
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700671static void __set_page_dirty(struct page *page,
Nick Piggin787d2212007-07-17 04:03:34 -0700672 struct address_space *mapping, int warn)
673{
Nick Piggin19fd6232008-07-25 19:45:32 -0700674 spin_lock_irq(&mapping->tree_lock);
Nick Piggin787d2212007-07-17 04:03:34 -0700675 if (page->mapping) { /* Race with truncate? */
676 WARN_ON_ONCE(warn && !PageUptodate(page));
Edward Shishkine3a7cca2009-03-31 15:19:39 -0700677 account_page_dirtied(page, mapping);
Nick Piggin787d2212007-07-17 04:03:34 -0700678 radix_tree_tag_set(&mapping->page_tree,
679 page_index(page), PAGECACHE_TAG_DIRTY);
680 }
Nick Piggin19fd6232008-07-25 19:45:32 -0700681 spin_unlock_irq(&mapping->tree_lock);
Nick Piggin787d2212007-07-17 04:03:34 -0700682 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
Nick Piggin787d2212007-07-17 04:03:34 -0700683}
684
685/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 * Add a page to the dirty page list.
687 *
688 * It is a sad fact of life that this function is called from several places
689 * deeply under spinlocking. It may not sleep.
690 *
691 * If the page has buffers, the uptodate buffers are set dirty, to preserve
692 * dirty-state coherency between the page and the buffers. It the page does
693 * not have buffers then when they are later attached they will all be set
694 * dirty.
695 *
696 * The buffers are dirtied before the page is dirtied. There's a small race
697 * window in which a writepage caller may see the page cleanness but not the
698 * buffer dirtiness. That's fine. If this code were to set the page dirty
699 * before the buffers, a concurrent writepage caller could clear the page dirty
700 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
701 * page on the dirty page list.
702 *
703 * We use private_lock to lock against try_to_free_buffers while using the
704 * page's buffer list. Also use this to protect against clean buffers being
705 * added to the page after it was set dirty.
706 *
707 * FIXME: may need to call ->reservepage here as well. That's rather up to the
708 * address_space though.
709 */
710int __set_page_dirty_buffers(struct page *page)
711{
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700712 int newly_dirty;
Nick Piggin787d2212007-07-17 04:03:34 -0700713 struct address_space *mapping = page_mapping(page);
Nick Pigginebf7a222006-10-10 04:36:54 +0200714
715 if (unlikely(!mapping))
716 return !TestSetPageDirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
718 spin_lock(&mapping->private_lock);
719 if (page_has_buffers(page)) {
720 struct buffer_head *head = page_buffers(page);
721 struct buffer_head *bh = head;
722
723 do {
724 set_buffer_dirty(bh);
725 bh = bh->b_this_page;
726 } while (bh != head);
727 }
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700728 newly_dirty = !TestSetPageDirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 spin_unlock(&mapping->private_lock);
730
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700731 if (newly_dirty)
732 __set_page_dirty(page, mapping, 1);
733 return newly_dirty;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734}
735EXPORT_SYMBOL(__set_page_dirty_buffers);
736
737/*
738 * Write out and wait upon a list of buffers.
739 *
740 * We have conflicting pressures: we want to make sure that all
741 * initially dirty buffers get waited on, but that any subsequently
742 * dirtied buffers don't. After all, we don't want fsync to last
743 * forever if somebody is actively writing to the file.
744 *
745 * Do this in two main stages: first we copy dirty buffers to a
746 * temporary inode list, queueing the writes as we go. Then we clean
747 * up, waiting for those writes to complete.
748 *
749 * During this second stage, any subsequent updates to the file may end
750 * up refiling the buffer on the original inode's dirty list again, so
751 * there is a chance we will end up with a buffer queued for write but
752 * not yet completed on that list. So, as a final cleanup we go through
753 * the osync code to catch these locked, dirty buffers without requeuing
754 * any newly dirty buffers for write.
755 */
756static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
757{
758 struct buffer_head *bh;
759 struct list_head tmp;
Jens Axboe9cf6b722009-04-06 14:48:03 +0200760 struct address_space *mapping, *prev_mapping = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 int err = 0, err2;
762
763 INIT_LIST_HEAD(&tmp);
764
765 spin_lock(lock);
766 while (!list_empty(list)) {
767 bh = BH_ENTRY(list->next);
Jan Kara535ee2f2008-02-08 04:21:59 -0800768 mapping = bh->b_assoc_map;
Jan Kara58ff4072006-10-17 00:10:19 -0700769 __remove_assoc_queue(bh);
Jan Kara535ee2f2008-02-08 04:21:59 -0800770 /* Avoid race with mark_buffer_dirty_inode() which does
771 * a lockless check and we rely on seeing the dirty bit */
772 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 if (buffer_dirty(bh) || buffer_locked(bh)) {
774 list_add(&bh->b_assoc_buffers, &tmp);
Jan Kara535ee2f2008-02-08 04:21:59 -0800775 bh->b_assoc_map = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 if (buffer_dirty(bh)) {
777 get_bh(bh);
778 spin_unlock(lock);
779 /*
780 * Ensure any pending I/O completes so that
781 * ll_rw_block() actually writes the current
782 * contents - it is a noop if I/O is still in
783 * flight on potentially older contents.
784 */
Jens Axboe9cf6b722009-04-06 14:48:03 +0200785 ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
786
787 /*
788 * Kick off IO for the previous mapping. Note
789 * that we will not run the very last mapping,
790 * wait_on_buffer() will do that for us
791 * through sync_buffer().
792 */
793 if (prev_mapping && prev_mapping != mapping)
794 blk_run_address_space(prev_mapping);
795 prev_mapping = mapping;
796
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 brelse(bh);
798 spin_lock(lock);
799 }
800 }
801 }
802
803 while (!list_empty(&tmp)) {
804 bh = BH_ENTRY(tmp.prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 get_bh(bh);
Jan Kara535ee2f2008-02-08 04:21:59 -0800806 mapping = bh->b_assoc_map;
807 __remove_assoc_queue(bh);
808 /* Avoid race with mark_buffer_dirty_inode() which does
809 * a lockless check and we rely on seeing the dirty bit */
810 smp_mb();
811 if (buffer_dirty(bh)) {
812 list_add(&bh->b_assoc_buffers,
Jan Karae3892292008-03-04 14:28:33 -0800813 &mapping->private_list);
Jan Kara535ee2f2008-02-08 04:21:59 -0800814 bh->b_assoc_map = mapping;
815 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 spin_unlock(lock);
817 wait_on_buffer(bh);
818 if (!buffer_uptodate(bh))
819 err = -EIO;
820 brelse(bh);
821 spin_lock(lock);
822 }
823
824 spin_unlock(lock);
825 err2 = osync_buffers_list(lock, list);
826 if (err)
827 return err;
828 else
829 return err2;
830}
831
832/*
833 * Invalidate any and all dirty buffers on a given inode. We are
834 * probably unmounting the fs, but that doesn't mean we have already
835 * done a sync(). Just drop the buffers from the inode list.
836 *
837 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
838 * assumes that all the buffers are against the blockdev. Not true
839 * for reiserfs.
840 */
841void invalidate_inode_buffers(struct inode *inode)
842{
843 if (inode_has_buffers(inode)) {
844 struct address_space *mapping = &inode->i_data;
845 struct list_head *list = &mapping->private_list;
846 struct address_space *buffer_mapping = mapping->assoc_mapping;
847
848 spin_lock(&buffer_mapping->private_lock);
849 while (!list_empty(list))
850 __remove_assoc_queue(BH_ENTRY(list->next));
851 spin_unlock(&buffer_mapping->private_lock);
852 }
853}
Jan Kara52b19ac2008-09-23 18:24:08 +0200854EXPORT_SYMBOL(invalidate_inode_buffers);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
856/*
857 * Remove any clean buffers from the inode's buffer list. This is called
858 * when we're trying to free the inode itself. Those buffers can pin it.
859 *
860 * Returns true if all buffers were removed.
861 */
862int remove_inode_buffers(struct inode *inode)
863{
864 int ret = 1;
865
866 if (inode_has_buffers(inode)) {
867 struct address_space *mapping = &inode->i_data;
868 struct list_head *list = &mapping->private_list;
869 struct address_space *buffer_mapping = mapping->assoc_mapping;
870
871 spin_lock(&buffer_mapping->private_lock);
872 while (!list_empty(list)) {
873 struct buffer_head *bh = BH_ENTRY(list->next);
874 if (buffer_dirty(bh)) {
875 ret = 0;
876 break;
877 }
878 __remove_assoc_queue(bh);
879 }
880 spin_unlock(&buffer_mapping->private_lock);
881 }
882 return ret;
883}
884
885/*
886 * Create the appropriate buffers when given a page for data area and
887 * the size of each buffer.. Use the bh->b_this_page linked list to
888 * follow the buffers created. Return NULL if unable to create more
889 * buffers.
890 *
891 * The retry flag is used to differentiate async IO (paging, swapping)
892 * which may not fail from ordinary buffer allocations.
893 */
894struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
895 int retry)
896{
897 struct buffer_head *bh, *head;
898 long offset;
899
900try_again:
901 head = NULL;
902 offset = PAGE_SIZE;
903 while ((offset -= size) >= 0) {
904 bh = alloc_buffer_head(GFP_NOFS);
905 if (!bh)
906 goto no_grow;
907
908 bh->b_bdev = NULL;
909 bh->b_this_page = head;
910 bh->b_blocknr = -1;
911 head = bh;
912
913 bh->b_state = 0;
914 atomic_set(&bh->b_count, 0);
Chris Masonfc5cd582006-02-01 03:06:48 -0800915 bh->b_private = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 bh->b_size = size;
917
918 /* Link the buffer to its page */
919 set_bh_page(bh, page, offset);
920
Nathan Scott01ffe332006-01-17 09:02:07 +1100921 init_buffer(bh, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 }
923 return head;
924/*
925 * In case anything failed, we just free everything we got.
926 */
927no_grow:
928 if (head) {
929 do {
930 bh = head;
931 head = head->b_this_page;
932 free_buffer_head(bh);
933 } while (head);
934 }
935
936 /*
937 * Return failure for non-async IO requests. Async IO requests
938 * are not allowed to fail, so we have to wait until buffer heads
939 * become available. But we don't want tasks sleeping with
940 * partially complete buffers, so all were released above.
941 */
942 if (!retry)
943 return NULL;
944
945 /* We're _really_ low on memory. Now we just
946 * wait for old buffer heads to become free due to
947 * finishing IO. Since this is an async request and
948 * the reserve list is empty, we're sure there are
949 * async buffer heads in use.
950 */
951 free_more_memory();
952 goto try_again;
953}
954EXPORT_SYMBOL_GPL(alloc_page_buffers);
955
956static inline void
957link_dev_buffers(struct page *page, struct buffer_head *head)
958{
959 struct buffer_head *bh, *tail;
960
961 bh = head;
962 do {
963 tail = bh;
964 bh = bh->b_this_page;
965 } while (bh);
966 tail->b_this_page = head;
967 attach_page_buffers(page, head);
968}
969
970/*
971 * Initialise the state of a blockdev page's buffers.
972 */
973static void
974init_page_buffers(struct page *page, struct block_device *bdev,
975 sector_t block, int size)
976{
977 struct buffer_head *head = page_buffers(page);
978 struct buffer_head *bh = head;
979 int uptodate = PageUptodate(page);
980
981 do {
982 if (!buffer_mapped(bh)) {
983 init_buffer(bh, NULL, NULL);
984 bh->b_bdev = bdev;
985 bh->b_blocknr = block;
986 if (uptodate)
987 set_buffer_uptodate(bh);
988 set_buffer_mapped(bh);
989 }
990 block++;
991 bh = bh->b_this_page;
992 } while (bh != head);
993}
994
995/*
996 * Create the page-cache page that contains the requested block.
997 *
998 * This is user purely for blockdev mappings.
999 */
1000static struct page *
1001grow_dev_page(struct block_device *bdev, sector_t block,
1002 pgoff_t index, int size)
1003{
1004 struct inode *inode = bdev->bd_inode;
1005 struct page *page;
1006 struct buffer_head *bh;
1007
Christoph Lameterea125892007-05-16 22:11:21 -07001008 page = find_or_create_page(inode->i_mapping, index,
Mel Gorman769848c2007-07-17 04:03:05 -07001009 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 if (!page)
1011 return NULL;
1012
Eric Sesterhenne827f922006-03-26 18:24:46 +02001013 BUG_ON(!PageLocked(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014
1015 if (page_has_buffers(page)) {
1016 bh = page_buffers(page);
1017 if (bh->b_size == size) {
1018 init_page_buffers(page, bdev, block, size);
1019 return page;
1020 }
1021 if (!try_to_free_buffers(page))
1022 goto failed;
1023 }
1024
1025 /*
1026 * Allocate some buffers for this page
1027 */
1028 bh = alloc_page_buffers(page, size, 0);
1029 if (!bh)
1030 goto failed;
1031
1032 /*
1033 * Link the page to the buffers and initialise them. Take the
1034 * lock to be atomic wrt __find_get_block(), which does not
1035 * run under the page lock.
1036 */
1037 spin_lock(&inode->i_mapping->private_lock);
1038 link_dev_buffers(page, bh);
1039 init_page_buffers(page, bdev, block, size);
1040 spin_unlock(&inode->i_mapping->private_lock);
1041 return page;
1042
1043failed:
1044 BUG();
1045 unlock_page(page);
1046 page_cache_release(page);
1047 return NULL;
1048}
1049
1050/*
1051 * Create buffers for the specified block device block's page. If
1052 * that page was dirty, the buffers are set dirty also.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001054static int
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055grow_buffers(struct block_device *bdev, sector_t block, int size)
1056{
1057 struct page *page;
1058 pgoff_t index;
1059 int sizebits;
1060
1061 sizebits = -1;
1062 do {
1063 sizebits++;
1064 } while ((size << sizebits) < PAGE_SIZE);
1065
1066 index = block >> sizebits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
Andrew Mortone5657932006-10-11 01:21:46 -07001068 /*
1069 * Check for a block which wants to lie outside our maximum possible
1070 * pagecache index. (this comparison is done using sector_t types).
1071 */
1072 if (unlikely(index != block >> sizebits)) {
1073 char b[BDEVNAME_SIZE];
1074
1075 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1076 "device %s\n",
Harvey Harrison8e24eea2008-04-30 00:55:09 -07001077 __func__, (unsigned long long)block,
Andrew Mortone5657932006-10-11 01:21:46 -07001078 bdevname(bdev, b));
1079 return -EIO;
1080 }
1081 block = index << sizebits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 /* Create a page with the proper size buffers.. */
1083 page = grow_dev_page(bdev, block, index, size);
1084 if (!page)
1085 return 0;
1086 unlock_page(page);
1087 page_cache_release(page);
1088 return 1;
1089}
1090
Adrian Bunk75c96f82005-05-05 16:16:09 -07001091static struct buffer_head *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092__getblk_slow(struct block_device *bdev, sector_t block, int size)
1093{
1094 /* Size must be multiple of hard sectorsize */
Martin K. Petersene1defc42009-05-22 17:17:49 -04001095 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 (size < 512 || size > PAGE_SIZE))) {
1097 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1098 size);
Martin K. Petersene1defc42009-05-22 17:17:49 -04001099 printk(KERN_ERR "logical block size: %d\n",
1100 bdev_logical_block_size(bdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
1102 dump_stack();
1103 return NULL;
1104 }
1105
1106 for (;;) {
1107 struct buffer_head * bh;
Andrew Mortone5657932006-10-11 01:21:46 -07001108 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
1110 bh = __find_get_block(bdev, block, size);
1111 if (bh)
1112 return bh;
1113
Andrew Mortone5657932006-10-11 01:21:46 -07001114 ret = grow_buffers(bdev, block, size);
1115 if (ret < 0)
1116 return NULL;
1117 if (ret == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 free_more_memory();
1119 }
1120}
1121
1122/*
1123 * The relationship between dirty buffers and dirty pages:
1124 *
1125 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1126 * the page is tagged dirty in its radix tree.
1127 *
1128 * At all times, the dirtiness of the buffers represents the dirtiness of
1129 * subsections of the page. If the page has buffers, the page dirty bit is
1130 * merely a hint about the true dirty state.
1131 *
1132 * When a page is set dirty in its entirety, all its buffers are marked dirty
1133 * (if the page has buffers).
1134 *
1135 * When a buffer is marked dirty, its page is dirtied, but the page's other
1136 * buffers are not.
1137 *
1138 * Also. When blockdev buffers are explicitly read with bread(), they
1139 * individually become uptodate. But their backing page remains not
1140 * uptodate - even if all of its buffers are uptodate. A subsequent
1141 * block_read_full_page() against that page will discover all the uptodate
1142 * buffers, will set the page uptodate and will perform no I/O.
1143 */
1144
1145/**
1146 * mark_buffer_dirty - mark a buffer_head as needing writeout
Martin Waitz67be2dd2005-05-01 08:59:26 -07001147 * @bh: the buffer_head to mark dirty
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 *
1149 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1150 * backing page dirty, then tag the page as dirty in its address_space's radix
1151 * tree and then attach the address_space's inode to its superblock's dirty
1152 * inode list.
1153 *
1154 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1155 * mapping->tree_lock and the global inode_lock.
1156 */
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -08001157void mark_buffer_dirty(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158{
Nick Piggin787d2212007-07-17 04:03:34 -07001159 WARN_ON_ONCE(!buffer_uptodate(bh));
Linus Torvalds1be62dc2008-04-04 14:38:17 -07001160
1161 /*
1162 * Very *carefully* optimize the it-is-already-dirty case.
1163 *
1164 * Don't let the final "is it dirty" escape to before we
1165 * perhaps modified the buffer.
1166 */
1167 if (buffer_dirty(bh)) {
1168 smp_mb();
1169 if (buffer_dirty(bh))
1170 return;
1171 }
1172
Linus Torvaldsa8e7d492009-03-19 11:32:05 -07001173 if (!test_set_buffer_dirty(bh)) {
1174 struct page *page = bh->b_page;
Linus Torvalds8e9d78e2009-08-21 17:40:08 -07001175 if (!TestSetPageDirty(page)) {
1176 struct address_space *mapping = page_mapping(page);
1177 if (mapping)
1178 __set_page_dirty(page, mapping, 0);
1179 }
Linus Torvaldsa8e7d492009-03-19 11:32:05 -07001180 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07001182EXPORT_SYMBOL(mark_buffer_dirty);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183
1184/*
1185 * Decrement a buffer_head's reference count. If all buffers against a page
1186 * have zero reference count, are clean and unlocked, and if the page is clean
1187 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1188 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1189 * a page but it ends up not being freed, and buffers may later be reattached).
1190 */
1191void __brelse(struct buffer_head * buf)
1192{
1193 if (atomic_read(&buf->b_count)) {
1194 put_bh(buf);
1195 return;
1196 }
Arjan van de Ven5c752ad2008-07-25 19:45:40 -07001197 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07001199EXPORT_SYMBOL(__brelse);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200
1201/*
1202 * bforget() is like brelse(), except it discards any
1203 * potentially dirty data.
1204 */
1205void __bforget(struct buffer_head *bh)
1206{
1207 clear_buffer_dirty(bh);
Jan Kara535ee2f2008-02-08 04:21:59 -08001208 if (bh->b_assoc_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 struct address_space *buffer_mapping = bh->b_page->mapping;
1210
1211 spin_lock(&buffer_mapping->private_lock);
1212 list_del_init(&bh->b_assoc_buffers);
Jan Kara58ff4072006-10-17 00:10:19 -07001213 bh->b_assoc_map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 spin_unlock(&buffer_mapping->private_lock);
1215 }
1216 __brelse(bh);
1217}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07001218EXPORT_SYMBOL(__bforget);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
1220static struct buffer_head *__bread_slow(struct buffer_head *bh)
1221{
1222 lock_buffer(bh);
1223 if (buffer_uptodate(bh)) {
1224 unlock_buffer(bh);
1225 return bh;
1226 } else {
1227 get_bh(bh);
1228 bh->b_end_io = end_buffer_read_sync;
1229 submit_bh(READ, bh);
1230 wait_on_buffer(bh);
1231 if (buffer_uptodate(bh))
1232 return bh;
1233 }
1234 brelse(bh);
1235 return NULL;
1236}
1237
1238/*
1239 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1240 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1241 * refcount elevated by one when they're in an LRU. A buffer can only appear
1242 * once in a particular CPU's LRU. A single buffer can be present in multiple
1243 * CPU's LRUs at the same time.
1244 *
1245 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1246 * sb_find_get_block().
1247 *
1248 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1249 * a local interrupt disable for that.
1250 */
1251
1252#define BH_LRU_SIZE 8
1253
1254struct bh_lru {
1255 struct buffer_head *bhs[BH_LRU_SIZE];
1256};
1257
1258static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1259
1260#ifdef CONFIG_SMP
1261#define bh_lru_lock() local_irq_disable()
1262#define bh_lru_unlock() local_irq_enable()
1263#else
1264#define bh_lru_lock() preempt_disable()
1265#define bh_lru_unlock() preempt_enable()
1266#endif
1267
1268static inline void check_irqs_on(void)
1269{
1270#ifdef irqs_disabled
1271 BUG_ON(irqs_disabled());
1272#endif
1273}
1274
1275/*
1276 * The LRU management algorithm is dopey-but-simple. Sorry.
1277 */
1278static void bh_lru_install(struct buffer_head *bh)
1279{
1280 struct buffer_head *evictee = NULL;
1281 struct bh_lru *lru;
1282
1283 check_irqs_on();
1284 bh_lru_lock();
1285 lru = &__get_cpu_var(bh_lrus);
1286 if (lru->bhs[0] != bh) {
1287 struct buffer_head *bhs[BH_LRU_SIZE];
1288 int in;
1289 int out = 0;
1290
1291 get_bh(bh);
1292 bhs[out++] = bh;
1293 for (in = 0; in < BH_LRU_SIZE; in++) {
1294 struct buffer_head *bh2 = lru->bhs[in];
1295
1296 if (bh2 == bh) {
1297 __brelse(bh2);
1298 } else {
1299 if (out >= BH_LRU_SIZE) {
1300 BUG_ON(evictee != NULL);
1301 evictee = bh2;
1302 } else {
1303 bhs[out++] = bh2;
1304 }
1305 }
1306 }
1307 while (out < BH_LRU_SIZE)
1308 bhs[out++] = NULL;
1309 memcpy(lru->bhs, bhs, sizeof(bhs));
1310 }
1311 bh_lru_unlock();
1312
1313 if (evictee)
1314 __brelse(evictee);
1315}
1316
1317/*
1318 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1319 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001320static struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001321lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322{
1323 struct buffer_head *ret = NULL;
1324 struct bh_lru *lru;
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001325 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326
1327 check_irqs_on();
1328 bh_lru_lock();
1329 lru = &__get_cpu_var(bh_lrus);
1330 for (i = 0; i < BH_LRU_SIZE; i++) {
1331 struct buffer_head *bh = lru->bhs[i];
1332
1333 if (bh && bh->b_bdev == bdev &&
1334 bh->b_blocknr == block && bh->b_size == size) {
1335 if (i) {
1336 while (i) {
1337 lru->bhs[i] = lru->bhs[i - 1];
1338 i--;
1339 }
1340 lru->bhs[0] = bh;
1341 }
1342 get_bh(bh);
1343 ret = bh;
1344 break;
1345 }
1346 }
1347 bh_lru_unlock();
1348 return ret;
1349}
1350
1351/*
1352 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1353 * it in the LRU and mark it as accessed. If it is not present then return
1354 * NULL
1355 */
1356struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001357__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358{
1359 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1360
1361 if (bh == NULL) {
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -08001362 bh = __find_get_block_slow(bdev, block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 if (bh)
1364 bh_lru_install(bh);
1365 }
1366 if (bh)
1367 touch_buffer(bh);
1368 return bh;
1369}
1370EXPORT_SYMBOL(__find_get_block);
1371
1372/*
1373 * __getblk will locate (and, if necessary, create) the buffer_head
1374 * which corresponds to the passed block_device, block and size. The
1375 * returned buffer has its reference count incremented.
1376 *
1377 * __getblk() cannot fail - it just keeps trying. If you pass it an
1378 * illegal block number, __getblk() will happily return a buffer_head
1379 * which represents the non-existent block. Very weird.
1380 *
1381 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1382 * attempt is failing. FIXME, perhaps?
1383 */
1384struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001385__getblk(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386{
1387 struct buffer_head *bh = __find_get_block(bdev, block, size);
1388
1389 might_sleep();
1390 if (bh == NULL)
1391 bh = __getblk_slow(bdev, block, size);
1392 return bh;
1393}
1394EXPORT_SYMBOL(__getblk);
1395
1396/*
1397 * Do async read-ahead on a buffer..
1398 */
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001399void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400{
1401 struct buffer_head *bh = __getblk(bdev, block, size);
Andrew Mortona3e713b2005-10-30 15:03:15 -08001402 if (likely(bh)) {
1403 ll_rw_block(READA, 1, &bh);
1404 brelse(bh);
1405 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406}
1407EXPORT_SYMBOL(__breadahead);
1408
1409/**
1410 * __bread() - reads a specified block and returns the bh
Martin Waitz67be2dd2005-05-01 08:59:26 -07001411 * @bdev: the block_device to read from
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 * @block: number of block
1413 * @size: size (in bytes) to read
1414 *
1415 * Reads a specified block, and returns buffer head that contains it.
1416 * It returns NULL if the block was unreadable.
1417 */
1418struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001419__bread(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420{
1421 struct buffer_head *bh = __getblk(bdev, block, size);
1422
Andrew Mortona3e713b2005-10-30 15:03:15 -08001423 if (likely(bh) && !buffer_uptodate(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 bh = __bread_slow(bh);
1425 return bh;
1426}
1427EXPORT_SYMBOL(__bread);
1428
1429/*
1430 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1431 * This doesn't race because it runs in each cpu either in irq
1432 * or with preempt disabled.
1433 */
1434static void invalidate_bh_lru(void *arg)
1435{
1436 struct bh_lru *b = &get_cpu_var(bh_lrus);
1437 int i;
1438
1439 for (i = 0; i < BH_LRU_SIZE; i++) {
1440 brelse(b->bhs[i]);
1441 b->bhs[i] = NULL;
1442 }
1443 put_cpu_var(bh_lrus);
1444}
1445
Peter Zijlstraf9a14392007-05-06 14:49:55 -07001446void invalidate_bh_lrus(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447{
Jens Axboe15c8b6c2008-05-09 09:39:44 +02001448 on_each_cpu(invalidate_bh_lru, NULL, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449}
Nick Piggin9db55792008-02-08 04:19:49 -08001450EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451
1452void set_bh_page(struct buffer_head *bh,
1453 struct page *page, unsigned long offset)
1454{
1455 bh->b_page = page;
Eric Sesterhenne827f922006-03-26 18:24:46 +02001456 BUG_ON(offset >= PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 if (PageHighMem(page))
1458 /*
1459 * This catches illegal uses and preserves the offset:
1460 */
1461 bh->b_data = (char *)(0 + offset);
1462 else
1463 bh->b_data = page_address(page) + offset;
1464}
1465EXPORT_SYMBOL(set_bh_page);
1466
1467/*
1468 * Called when truncating a buffer on a page completely.
1469 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001470static void discard_buffer(struct buffer_head * bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471{
1472 lock_buffer(bh);
1473 clear_buffer_dirty(bh);
1474 bh->b_bdev = NULL;
1475 clear_buffer_mapped(bh);
1476 clear_buffer_req(bh);
1477 clear_buffer_new(bh);
1478 clear_buffer_delay(bh);
David Chinner33a266d2007-02-12 00:51:41 -08001479 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 unlock_buffer(bh);
1481}
1482
1483/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 * block_invalidatepage - invalidate part of all of a buffer-backed page
1485 *
1486 * @page: the page which is affected
1487 * @offset: the index of the truncation point
1488 *
1489 * block_invalidatepage() is called when all or part of the page has become
1490 * invalidatedby a truncate operation.
1491 *
1492 * block_invalidatepage() does not have to release all buffers, but it must
1493 * ensure that no dirty buffer is left outside @offset and that no I/O
1494 * is underway against any of the blocks which are outside the truncation
1495 * point. Because the caller is about to free (and possibly reuse) those
1496 * blocks on-disk.
1497 */
NeilBrown2ff28e22006-03-26 01:37:18 -08001498void block_invalidatepage(struct page *page, unsigned long offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499{
1500 struct buffer_head *head, *bh, *next;
1501 unsigned int curr_off = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502
1503 BUG_ON(!PageLocked(page));
1504 if (!page_has_buffers(page))
1505 goto out;
1506
1507 head = page_buffers(page);
1508 bh = head;
1509 do {
1510 unsigned int next_off = curr_off + bh->b_size;
1511 next = bh->b_this_page;
1512
1513 /*
1514 * is this block fully invalidated?
1515 */
1516 if (offset <= curr_off)
1517 discard_buffer(bh);
1518 curr_off = next_off;
1519 bh = next;
1520 } while (bh != head);
1521
1522 /*
1523 * We release buffers only if the entire page is being invalidated.
1524 * The get_block cached value has been unconditionally invalidated,
1525 * so real IO is not possible anymore.
1526 */
1527 if (offset == 0)
NeilBrown2ff28e22006-03-26 01:37:18 -08001528 try_to_release_page(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529out:
NeilBrown2ff28e22006-03-26 01:37:18 -08001530 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531}
1532EXPORT_SYMBOL(block_invalidatepage);
1533
1534/*
1535 * We attach and possibly dirty the buffers atomically wrt
1536 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1537 * is already excluded via the page lock.
1538 */
1539void create_empty_buffers(struct page *page,
1540 unsigned long blocksize, unsigned long b_state)
1541{
1542 struct buffer_head *bh, *head, *tail;
1543
1544 head = alloc_page_buffers(page, blocksize, 1);
1545 bh = head;
1546 do {
1547 bh->b_state |= b_state;
1548 tail = bh;
1549 bh = bh->b_this_page;
1550 } while (bh);
1551 tail->b_this_page = head;
1552
1553 spin_lock(&page->mapping->private_lock);
1554 if (PageUptodate(page) || PageDirty(page)) {
1555 bh = head;
1556 do {
1557 if (PageDirty(page))
1558 set_buffer_dirty(bh);
1559 if (PageUptodate(page))
1560 set_buffer_uptodate(bh);
1561 bh = bh->b_this_page;
1562 } while (bh != head);
1563 }
1564 attach_page_buffers(page, head);
1565 spin_unlock(&page->mapping->private_lock);
1566}
1567EXPORT_SYMBOL(create_empty_buffers);
1568
1569/*
1570 * We are taking a block for data and we don't want any output from any
1571 * buffer-cache aliases starting from return from that function and
1572 * until the moment when something will explicitly mark the buffer
1573 * dirty (hopefully that will not happen until we will free that block ;-)
1574 * We don't even need to mark it not-uptodate - nobody can expect
1575 * anything from a newly allocated buffer anyway. We used to used
1576 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1577 * don't want to mark the alias unmapped, for example - it would confuse
1578 * anyone who might pick it with bread() afterwards...
1579 *
1580 * Also.. Note that bforget() doesn't lock the buffer. So there can
1581 * be writeout I/O going on against recently-freed buffers. We don't
1582 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1583 * only if we really need to. That happens here.
1584 */
1585void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1586{
1587 struct buffer_head *old_bh;
1588
1589 might_sleep();
1590
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -08001591 old_bh = __find_get_block_slow(bdev, block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 if (old_bh) {
1593 clear_buffer_dirty(old_bh);
1594 wait_on_buffer(old_bh);
1595 clear_buffer_req(old_bh);
1596 __brelse(old_bh);
1597 }
1598}
1599EXPORT_SYMBOL(unmap_underlying_metadata);
1600
1601/*
1602 * NOTE! All mapped/uptodate combinations are valid:
1603 *
1604 * Mapped Uptodate Meaning
1605 *
1606 * No No "unknown" - must do get_block()
1607 * No Yes "hole" - zero-filled
1608 * Yes No "allocated" - allocated on disk, not read in
1609 * Yes Yes "valid" - allocated and up-to-date in memory.
1610 *
1611 * "Dirty" is valid only with the last case (mapped+uptodate).
1612 */
1613
1614/*
1615 * While block_write_full_page is writing back the dirty buffers under
1616 * the page lock, whoever dirtied the buffers may decide to clean them
1617 * again at any time. We handle that by only looking at the buffer
1618 * state inside lock_buffer().
1619 *
1620 * If block_write_full_page() is called for regular writeback
1621 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1622 * locked buffer. This only can happen if someone has written the buffer
1623 * directly, with submit_bh(). At the address_space level PageWriteback
1624 * prevents this contention from occurring.
Theodore Ts'o6e34eed2009-04-07 18:12:43 -04001625 *
1626 * If block_write_full_page() is called with wbc->sync_mode ==
1627 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
1628 * causes the writes to be flagged as synchronous writes, but the
1629 * block device queue will NOT be unplugged, since usually many pages
1630 * will be pushed to the out before the higher-level caller actually
1631 * waits for the writes to be completed. The various wait functions,
1632 * such as wait_on_writeback_range() will ultimately call sync_page()
1633 * which will ultimately call blk_run_backing_dev(), which will end up
1634 * unplugging the device queue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 */
1636static int __block_write_full_page(struct inode *inode, struct page *page,
Chris Mason35c80d52009-04-15 13:22:38 -04001637 get_block_t *get_block, struct writeback_control *wbc,
1638 bh_end_io_t *handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639{
1640 int err;
1641 sector_t block;
1642 sector_t last_block;
Andrew Mortonf0fbd5f2005-05-05 16:15:48 -07001643 struct buffer_head *bh, *head;
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001644 const unsigned blocksize = 1 << inode->i_blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 int nr_underway = 0;
Theodore Ts'o6e34eed2009-04-07 18:12:43 -04001646 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1647 WRITE_SYNC_PLUG : WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648
1649 BUG_ON(!PageLocked(page));
1650
1651 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1652
1653 if (!page_has_buffers(page)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001654 create_empty_buffers(page, blocksize,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 (1 << BH_Dirty)|(1 << BH_Uptodate));
1656 }
1657
1658 /*
1659 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1660 * here, and the (potentially unmapped) buffers may become dirty at
1661 * any time. If a buffer becomes dirty here after we've inspected it
1662 * then we just miss that fact, and the page stays dirty.
1663 *
1664 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1665 * handle that here by just cleaning them.
1666 */
1667
Andrew Morton54b21a72006-01-08 01:03:05 -08001668 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 head = page_buffers(page);
1670 bh = head;
1671
1672 /*
1673 * Get all the dirty buffers mapped to disk addresses and
1674 * handle any aliases from the underlying blockdev's mapping.
1675 */
1676 do {
1677 if (block > last_block) {
1678 /*
1679 * mapped buffers outside i_size will occur, because
1680 * this page can be outside i_size when there is a
1681 * truncate in progress.
1682 */
1683 /*
1684 * The buffer was zeroed by block_write_full_page()
1685 */
1686 clear_buffer_dirty(bh);
1687 set_buffer_uptodate(bh);
Alex Tomas29a814d2008-07-11 19:27:31 -04001688 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1689 buffer_dirty(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001690 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 err = get_block(inode, block, bh, 1);
1692 if (err)
1693 goto recover;
Alex Tomas29a814d2008-07-11 19:27:31 -04001694 clear_buffer_delay(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 if (buffer_new(bh)) {
1696 /* blockdev mappings never come here */
1697 clear_buffer_new(bh);
1698 unmap_underlying_metadata(bh->b_bdev,
1699 bh->b_blocknr);
1700 }
1701 }
1702 bh = bh->b_this_page;
1703 block++;
1704 } while (bh != head);
1705
1706 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 if (!buffer_mapped(bh))
1708 continue;
1709 /*
1710 * If it's a fully non-blocking write attempt and we cannot
1711 * lock the buffer then redirty the page. Note that this can
1712 * potentially cause a busy-wait loop from pdflush and kswapd
1713 * activity, but those code paths have their own higher-level
1714 * throttling.
1715 */
1716 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1717 lock_buffer(bh);
Nick Pigginca5de402008-08-02 12:02:13 +02001718 } else if (!trylock_buffer(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 redirty_page_for_writepage(wbc, page);
1720 continue;
1721 }
1722 if (test_clear_buffer_dirty(bh)) {
Chris Mason35c80d52009-04-15 13:22:38 -04001723 mark_buffer_async_write_endio(bh, handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 } else {
1725 unlock_buffer(bh);
1726 }
1727 } while ((bh = bh->b_this_page) != head);
1728
1729 /*
1730 * The page and its buffers are protected by PageWriteback(), so we can
1731 * drop the bh refcounts early.
1732 */
1733 BUG_ON(PageWriteback(page));
1734 set_page_writeback(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735
1736 do {
1737 struct buffer_head *next = bh->b_this_page;
1738 if (buffer_async_write(bh)) {
Theodore Ts'oa64c8612009-03-27 22:14:10 -04001739 submit_bh(write_op, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 nr_underway++;
1741 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 bh = next;
1743 } while (bh != head);
Andrew Morton05937ba2005-05-05 16:15:47 -07001744 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745
1746 err = 0;
1747done:
1748 if (nr_underway == 0) {
1749 /*
1750 * The page was marked dirty, but the buffers were
1751 * clean. Someone wrote them back by hand with
1752 * ll_rw_block/submit_bh. A rare case.
1753 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 end_page_writeback(page);
Nick Piggin3d67f2d2007-05-06 14:49:05 -07001755
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 /*
1757 * The page and buffer_heads can be released at any time from
1758 * here on.
1759 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 }
1761 return err;
1762
1763recover:
1764 /*
1765 * ENOSPC, or some other error. We may already have added some
1766 * blocks to the file, so we need to write these out to avoid
1767 * exposing stale data.
1768 * The page is currently locked and not marked for writeback
1769 */
1770 bh = head;
1771 /* Recovery: lock and submit the mapped buffers */
1772 do {
Alex Tomas29a814d2008-07-11 19:27:31 -04001773 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1774 !buffer_delay(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 lock_buffer(bh);
Chris Mason35c80d52009-04-15 13:22:38 -04001776 mark_buffer_async_write_endio(bh, handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 } else {
1778 /*
1779 * The buffer may have been set dirty during
1780 * attachment to a dirty page.
1781 */
1782 clear_buffer_dirty(bh);
1783 }
1784 } while ((bh = bh->b_this_page) != head);
1785 SetPageError(page);
1786 BUG_ON(PageWriteback(page));
Andrew Morton7e4c3692007-05-08 00:23:27 -07001787 mapping_set_error(page->mapping, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 set_page_writeback(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 do {
1790 struct buffer_head *next = bh->b_this_page;
1791 if (buffer_async_write(bh)) {
1792 clear_buffer_dirty(bh);
Theodore Ts'oa64c8612009-03-27 22:14:10 -04001793 submit_bh(write_op, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 nr_underway++;
1795 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 bh = next;
1797 } while (bh != head);
Nick Pigginffda9d32007-02-20 13:57:54 -08001798 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 goto done;
1800}
1801
Nick Pigginafddba42007-10-16 01:25:01 -07001802/*
1803 * If a page has any new buffers, zero them out here, and mark them uptodate
1804 * and dirty so they'll be written out (in order to prevent uninitialised
1805 * block data from leaking). And clear the new bit.
1806 */
1807void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1808{
1809 unsigned int block_start, block_end;
1810 struct buffer_head *head, *bh;
1811
1812 BUG_ON(!PageLocked(page));
1813 if (!page_has_buffers(page))
1814 return;
1815
1816 bh = head = page_buffers(page);
1817 block_start = 0;
1818 do {
1819 block_end = block_start + bh->b_size;
1820
1821 if (buffer_new(bh)) {
1822 if (block_end > from && block_start < to) {
1823 if (!PageUptodate(page)) {
1824 unsigned start, size;
1825
1826 start = max(from, block_start);
1827 size = min(to, block_end) - start;
1828
Christoph Lametereebd2aa2008-02-04 22:28:29 -08001829 zero_user(page, start, size);
Nick Pigginafddba42007-10-16 01:25:01 -07001830 set_buffer_uptodate(bh);
1831 }
1832
1833 clear_buffer_new(bh);
1834 mark_buffer_dirty(bh);
1835 }
1836 }
1837
1838 block_start = block_end;
1839 bh = bh->b_this_page;
1840 } while (bh != head);
1841}
1842EXPORT_SYMBOL(page_zero_new_buffers);
1843
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844static int __block_prepare_write(struct inode *inode, struct page *page,
1845 unsigned from, unsigned to, get_block_t *get_block)
1846{
1847 unsigned block_start, block_end;
1848 sector_t block;
1849 int err = 0;
1850 unsigned blocksize, bbits;
1851 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1852
1853 BUG_ON(!PageLocked(page));
1854 BUG_ON(from > PAGE_CACHE_SIZE);
1855 BUG_ON(to > PAGE_CACHE_SIZE);
1856 BUG_ON(from > to);
1857
1858 blocksize = 1 << inode->i_blkbits;
1859 if (!page_has_buffers(page))
1860 create_empty_buffers(page, blocksize, 0);
1861 head = page_buffers(page);
1862
1863 bbits = inode->i_blkbits;
1864 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1865
1866 for(bh = head, block_start = 0; bh != head || !block_start;
1867 block++, block_start=block_end, bh = bh->b_this_page) {
1868 block_end = block_start + blocksize;
1869 if (block_end <= from || block_start >= to) {
1870 if (PageUptodate(page)) {
1871 if (!buffer_uptodate(bh))
1872 set_buffer_uptodate(bh);
1873 }
1874 continue;
1875 }
1876 if (buffer_new(bh))
1877 clear_buffer_new(bh);
1878 if (!buffer_mapped(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001879 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 err = get_block(inode, block, bh, 1);
1881 if (err)
Nick Pigginf3ddbdc2005-05-05 16:15:45 -07001882 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 if (buffer_new(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 unmap_underlying_metadata(bh->b_bdev,
1885 bh->b_blocknr);
1886 if (PageUptodate(page)) {
Nick Piggin637aff42007-10-16 01:25:00 -07001887 clear_buffer_new(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 set_buffer_uptodate(bh);
Nick Piggin637aff42007-10-16 01:25:00 -07001889 mark_buffer_dirty(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 continue;
1891 }
Christoph Lametereebd2aa2008-02-04 22:28:29 -08001892 if (block_end > to || block_start < from)
1893 zero_user_segments(page,
1894 to, block_end,
1895 block_start, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 continue;
1897 }
1898 }
1899 if (PageUptodate(page)) {
1900 if (!buffer_uptodate(bh))
1901 set_buffer_uptodate(bh);
1902 continue;
1903 }
1904 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
David Chinner33a266d2007-02-12 00:51:41 -08001905 !buffer_unwritten(bh) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 (block_start < from || block_end > to)) {
1907 ll_rw_block(READ, 1, &bh);
1908 *wait_bh++=bh;
1909 }
1910 }
1911 /*
1912 * If we issued read requests - let them complete.
1913 */
1914 while(wait_bh > wait) {
1915 wait_on_buffer(*--wait_bh);
1916 if (!buffer_uptodate(*wait_bh))
Nick Pigginf3ddbdc2005-05-05 16:15:45 -07001917 err = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 }
Nick Pigginafddba42007-10-16 01:25:01 -07001919 if (unlikely(err))
1920 page_zero_new_buffers(page, from, to);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 return err;
1922}
1923
1924static int __block_commit_write(struct inode *inode, struct page *page,
1925 unsigned from, unsigned to)
1926{
1927 unsigned block_start, block_end;
1928 int partial = 0;
1929 unsigned blocksize;
1930 struct buffer_head *bh, *head;
1931
1932 blocksize = 1 << inode->i_blkbits;
1933
1934 for(bh = head = page_buffers(page), block_start = 0;
1935 bh != head || !block_start;
1936 block_start=block_end, bh = bh->b_this_page) {
1937 block_end = block_start + blocksize;
1938 if (block_end <= from || block_start >= to) {
1939 if (!buffer_uptodate(bh))
1940 partial = 1;
1941 } else {
1942 set_buffer_uptodate(bh);
1943 mark_buffer_dirty(bh);
1944 }
Nick Pigginafddba42007-10-16 01:25:01 -07001945 clear_buffer_new(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 }
1947
1948 /*
1949 * If this is a partial write which happened to make all buffers
1950 * uptodate then we can optimize away a bogus readpage() for
1951 * the next read(). Here we 'discover' whether the page went
1952 * uptodate as a result of this (potentially partial) write.
1953 */
1954 if (!partial)
1955 SetPageUptodate(page);
1956 return 0;
1957}
1958
1959/*
Nick Pigginafddba42007-10-16 01:25:01 -07001960 * block_write_begin takes care of the basic task of block allocation and
1961 * bringing partial write blocks uptodate first.
1962 *
1963 * If *pagep is not NULL, then block_write_begin uses the locked page
1964 * at *pagep rather than allocating its own. In this case, the page will
1965 * not be unlocked or deallocated on failure.
1966 */
1967int block_write_begin(struct file *file, struct address_space *mapping,
1968 loff_t pos, unsigned len, unsigned flags,
1969 struct page **pagep, void **fsdata,
1970 get_block_t *get_block)
1971{
1972 struct inode *inode = mapping->host;
1973 int status = 0;
1974 struct page *page;
1975 pgoff_t index;
1976 unsigned start, end;
1977 int ownpage = 0;
1978
1979 index = pos >> PAGE_CACHE_SHIFT;
1980 start = pos & (PAGE_CACHE_SIZE - 1);
1981 end = start + len;
1982
1983 page = *pagep;
1984 if (page == NULL) {
1985 ownpage = 1;
Nick Piggin54566b22009-01-04 12:00:53 -08001986 page = grab_cache_page_write_begin(mapping, index, flags);
Nick Pigginafddba42007-10-16 01:25:01 -07001987 if (!page) {
1988 status = -ENOMEM;
1989 goto out;
1990 }
1991 *pagep = page;
1992 } else
1993 BUG_ON(!PageLocked(page));
1994
1995 status = __block_prepare_write(inode, page, start, end, get_block);
1996 if (unlikely(status)) {
1997 ClearPageUptodate(page);
1998
1999 if (ownpage) {
2000 unlock_page(page);
2001 page_cache_release(page);
2002 *pagep = NULL;
2003
2004 /*
2005 * prepare_write() may have instantiated a few blocks
2006 * outside i_size. Trim these off again. Don't need
2007 * i_size_read because we hold i_mutex.
2008 */
2009 if (pos + len > inode->i_size)
2010 vmtruncate(inode, inode->i_size);
2011 }
Nick Pigginafddba42007-10-16 01:25:01 -07002012 }
2013
2014out:
2015 return status;
2016}
2017EXPORT_SYMBOL(block_write_begin);
2018
2019int block_write_end(struct file *file, struct address_space *mapping,
2020 loff_t pos, unsigned len, unsigned copied,
2021 struct page *page, void *fsdata)
2022{
2023 struct inode *inode = mapping->host;
2024 unsigned start;
2025
2026 start = pos & (PAGE_CACHE_SIZE - 1);
2027
2028 if (unlikely(copied < len)) {
2029 /*
2030 * The buffers that were written will now be uptodate, so we
2031 * don't have to worry about a readpage reading them and
2032 * overwriting a partial write. However if we have encountered
2033 * a short write and only partially written into a buffer, it
2034 * will not be marked uptodate, so a readpage might come in and
2035 * destroy our partial write.
2036 *
2037 * Do the simplest thing, and just treat any short write to a
2038 * non uptodate page as a zero-length write, and force the
2039 * caller to redo the whole thing.
2040 */
2041 if (!PageUptodate(page))
2042 copied = 0;
2043
2044 page_zero_new_buffers(page, start+copied, start+len);
2045 }
2046 flush_dcache_page(page);
2047
2048 /* This could be a short (even 0-length) commit */
2049 __block_commit_write(inode, page, start, start+copied);
2050
2051 return copied;
2052}
2053EXPORT_SYMBOL(block_write_end);
2054
2055int generic_write_end(struct file *file, struct address_space *mapping,
2056 loff_t pos, unsigned len, unsigned copied,
2057 struct page *page, void *fsdata)
2058{
2059 struct inode *inode = mapping->host;
Jan Karac7d206b2008-07-11 19:27:31 -04002060 int i_size_changed = 0;
Nick Pigginafddba42007-10-16 01:25:01 -07002061
2062 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2063
2064 /*
2065 * No need to use i_size_read() here, the i_size
2066 * cannot change under us because we hold i_mutex.
2067 *
2068 * But it's important to update i_size while still holding page lock:
2069 * page writeout could otherwise come in and zero beyond i_size.
2070 */
2071 if (pos+copied > inode->i_size) {
2072 i_size_write(inode, pos+copied);
Jan Karac7d206b2008-07-11 19:27:31 -04002073 i_size_changed = 1;
Nick Pigginafddba42007-10-16 01:25:01 -07002074 }
2075
2076 unlock_page(page);
2077 page_cache_release(page);
2078
Jan Karac7d206b2008-07-11 19:27:31 -04002079 /*
2080 * Don't mark the inode dirty under page lock. First, it unnecessarily
2081 * makes the holding time of page lock longer. Second, it forces lock
2082 * ordering of page lock and transaction start for journaling
2083 * filesystems.
2084 */
2085 if (i_size_changed)
2086 mark_inode_dirty(inode);
2087
Nick Pigginafddba42007-10-16 01:25:01 -07002088 return copied;
2089}
2090EXPORT_SYMBOL(generic_write_end);
2091
2092/*
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07002093 * block_is_partially_uptodate checks whether buffers within a page are
2094 * uptodate or not.
2095 *
2096 * Returns true if all buffers which correspond to a file portion
2097 * we want to read are uptodate.
2098 */
2099int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2100 unsigned long from)
2101{
2102 struct inode *inode = page->mapping->host;
2103 unsigned block_start, block_end, blocksize;
2104 unsigned to;
2105 struct buffer_head *bh, *head;
2106 int ret = 1;
2107
2108 if (!page_has_buffers(page))
2109 return 0;
2110
2111 blocksize = 1 << inode->i_blkbits;
2112 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2113 to = from + to;
2114 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2115 return 0;
2116
2117 head = page_buffers(page);
2118 bh = head;
2119 block_start = 0;
2120 do {
2121 block_end = block_start + blocksize;
2122 if (block_end > from && block_start < to) {
2123 if (!buffer_uptodate(bh)) {
2124 ret = 0;
2125 break;
2126 }
2127 if (block_end >= to)
2128 break;
2129 }
2130 block_start = block_end;
2131 bh = bh->b_this_page;
2132 } while (bh != head);
2133
2134 return ret;
2135}
2136EXPORT_SYMBOL(block_is_partially_uptodate);
2137
2138/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 * Generic "read page" function for block devices that have the normal
2140 * get_block functionality. This is most of the block device filesystems.
2141 * Reads the page asynchronously --- the unlock_buffer() and
2142 * set/clear_buffer_uptodate() functions propagate buffer state into the
2143 * page struct once IO has completed.
2144 */
2145int block_read_full_page(struct page *page, get_block_t *get_block)
2146{
2147 struct inode *inode = page->mapping->host;
2148 sector_t iblock, lblock;
2149 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2150 unsigned int blocksize;
2151 int nr, i;
2152 int fully_mapped = 1;
2153
Matt Mackallcd7619d2005-05-01 08:59:01 -07002154 BUG_ON(!PageLocked(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 blocksize = 1 << inode->i_blkbits;
2156 if (!page_has_buffers(page))
2157 create_empty_buffers(page, blocksize, 0);
2158 head = page_buffers(page);
2159
2160 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2161 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2162 bh = head;
2163 nr = 0;
2164 i = 0;
2165
2166 do {
2167 if (buffer_uptodate(bh))
2168 continue;
2169
2170 if (!buffer_mapped(bh)) {
Andrew Mortonc64610b2005-05-16 21:53:49 -07002171 int err = 0;
2172
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 fully_mapped = 0;
2174 if (iblock < lblock) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002175 WARN_ON(bh->b_size != blocksize);
Andrew Mortonc64610b2005-05-16 21:53:49 -07002176 err = get_block(inode, iblock, bh, 0);
2177 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 SetPageError(page);
2179 }
2180 if (!buffer_mapped(bh)) {
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002181 zero_user(page, i * blocksize, blocksize);
Andrew Mortonc64610b2005-05-16 21:53:49 -07002182 if (!err)
2183 set_buffer_uptodate(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 continue;
2185 }
2186 /*
2187 * get_block() might have updated the buffer
2188 * synchronously
2189 */
2190 if (buffer_uptodate(bh))
2191 continue;
2192 }
2193 arr[nr++] = bh;
2194 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2195
2196 if (fully_mapped)
2197 SetPageMappedToDisk(page);
2198
2199 if (!nr) {
2200 /*
2201 * All buffers are uptodate - we can set the page uptodate
2202 * as well. But not if get_block() returned an error.
2203 */
2204 if (!PageError(page))
2205 SetPageUptodate(page);
2206 unlock_page(page);
2207 return 0;
2208 }
2209
2210 /* Stage two: lock the buffers */
2211 for (i = 0; i < nr; i++) {
2212 bh = arr[i];
2213 lock_buffer(bh);
2214 mark_buffer_async_read(bh);
2215 }
2216
2217 /*
2218 * Stage 3: start the IO. Check for uptodateness
2219 * inside the buffer lock in case another process reading
2220 * the underlying blockdev brought it uptodate (the sct fix).
2221 */
2222 for (i = 0; i < nr; i++) {
2223 bh = arr[i];
2224 if (buffer_uptodate(bh))
2225 end_buffer_async_read(bh, 1);
2226 else
2227 submit_bh(READ, bh);
2228 }
2229 return 0;
2230}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002231EXPORT_SYMBOL(block_read_full_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232
2233/* utility function for filesystems that need to do work on expanding
Nick Piggin89e10782007-10-16 01:25:07 -07002234 * truncates. Uses filesystem pagecache writes to allow the filesystem to
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 * deal with the hole.
2236 */
Nick Piggin89e10782007-10-16 01:25:07 -07002237int generic_cont_expand_simple(struct inode *inode, loff_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238{
2239 struct address_space *mapping = inode->i_mapping;
2240 struct page *page;
Nick Piggin89e10782007-10-16 01:25:07 -07002241 void *fsdata;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002242 unsigned long limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 int err;
2244
2245 err = -EFBIG;
2246 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2247 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2248 send_sig(SIGXFSZ, current, 0);
2249 goto out;
2250 }
2251 if (size > inode->i_sb->s_maxbytes)
2252 goto out;
2253
Nick Piggin89e10782007-10-16 01:25:07 -07002254 err = pagecache_write_begin(NULL, mapping, size, 0,
2255 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2256 &page, &fsdata);
2257 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 goto out;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002259
Nick Piggin89e10782007-10-16 01:25:07 -07002260 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2261 BUG_ON(err > 0);
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002262
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263out:
2264 return err;
2265}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002266EXPORT_SYMBOL(generic_cont_expand_simple);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267
Adrian Bunkf1e3af72008-04-29 00:59:01 -07002268static int cont_expand_zero(struct file *file, struct address_space *mapping,
2269 loff_t pos, loff_t *bytes)
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002270{
Nick Piggin89e10782007-10-16 01:25:07 -07002271 struct inode *inode = mapping->host;
2272 unsigned blocksize = 1 << inode->i_blkbits;
2273 struct page *page;
2274 void *fsdata;
2275 pgoff_t index, curidx;
2276 loff_t curpos;
2277 unsigned zerofrom, offset, len;
2278 int err = 0;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002279
Nick Piggin89e10782007-10-16 01:25:07 -07002280 index = pos >> PAGE_CACHE_SHIFT;
2281 offset = pos & ~PAGE_CACHE_MASK;
2282
2283 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2284 zerofrom = curpos & ~PAGE_CACHE_MASK;
2285 if (zerofrom & (blocksize-1)) {
2286 *bytes |= (blocksize-1);
2287 (*bytes)++;
2288 }
2289 len = PAGE_CACHE_SIZE - zerofrom;
2290
2291 err = pagecache_write_begin(file, mapping, curpos, len,
2292 AOP_FLAG_UNINTERRUPTIBLE,
2293 &page, &fsdata);
2294 if (err)
2295 goto out;
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002296 zero_user(page, zerofrom, len);
Nick Piggin89e10782007-10-16 01:25:07 -07002297 err = pagecache_write_end(file, mapping, curpos, len, len,
2298 page, fsdata);
2299 if (err < 0)
2300 goto out;
2301 BUG_ON(err != len);
2302 err = 0;
OGAWA Hirofumi061e9742008-04-28 02:16:28 -07002303
2304 balance_dirty_pages_ratelimited(mapping);
Nick Piggin89e10782007-10-16 01:25:07 -07002305 }
2306
2307 /* page covers the boundary, find the boundary offset */
2308 if (index == curidx) {
2309 zerofrom = curpos & ~PAGE_CACHE_MASK;
2310 /* if we will expand the thing last block will be filled */
2311 if (offset <= zerofrom) {
2312 goto out;
2313 }
2314 if (zerofrom & (blocksize-1)) {
2315 *bytes |= (blocksize-1);
2316 (*bytes)++;
2317 }
2318 len = offset - zerofrom;
2319
2320 err = pagecache_write_begin(file, mapping, curpos, len,
2321 AOP_FLAG_UNINTERRUPTIBLE,
2322 &page, &fsdata);
2323 if (err)
2324 goto out;
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002325 zero_user(page, zerofrom, len);
Nick Piggin89e10782007-10-16 01:25:07 -07002326 err = pagecache_write_end(file, mapping, curpos, len, len,
2327 page, fsdata);
2328 if (err < 0)
2329 goto out;
2330 BUG_ON(err != len);
2331 err = 0;
2332 }
2333out:
2334 return err;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002335}
2336
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337/*
2338 * For moronic filesystems that do not allow holes in file.
2339 * We may have to extend the file.
2340 */
Nick Piggin89e10782007-10-16 01:25:07 -07002341int cont_write_begin(struct file *file, struct address_space *mapping,
2342 loff_t pos, unsigned len, unsigned flags,
2343 struct page **pagep, void **fsdata,
2344 get_block_t *get_block, loff_t *bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 struct inode *inode = mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 unsigned blocksize = 1 << inode->i_blkbits;
Nick Piggin89e10782007-10-16 01:25:07 -07002348 unsigned zerofrom;
2349 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350
Nick Piggin89e10782007-10-16 01:25:07 -07002351 err = cont_expand_zero(file, mapping, pos, bytes);
2352 if (err)
2353 goto out;
2354
2355 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2356 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2357 *bytes |= (blocksize-1);
2358 (*bytes)++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 }
2360
Nick Piggin89e10782007-10-16 01:25:07 -07002361 *pagep = NULL;
2362 err = block_write_begin(file, mapping, pos, len,
2363 flags, pagep, fsdata, get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364out:
Nick Piggin89e10782007-10-16 01:25:07 -07002365 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002367EXPORT_SYMBOL(cont_write_begin);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368
2369int block_prepare_write(struct page *page, unsigned from, unsigned to,
2370 get_block_t *get_block)
2371{
2372 struct inode *inode = page->mapping->host;
2373 int err = __block_prepare_write(inode, page, from, to, get_block);
2374 if (err)
2375 ClearPageUptodate(page);
2376 return err;
2377}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002378EXPORT_SYMBOL(block_prepare_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379
2380int block_commit_write(struct page *page, unsigned from, unsigned to)
2381{
2382 struct inode *inode = page->mapping->host;
2383 __block_commit_write(inode,page,from,to);
2384 return 0;
2385}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002386EXPORT_SYMBOL(block_commit_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387
David Chinner54171692007-07-19 17:39:55 +10002388/*
2389 * block_page_mkwrite() is not allowed to change the file size as it gets
2390 * called from a page fault handler when a page is first dirtied. Hence we must
2391 * be careful to check for EOF conditions here. We set the page up correctly
2392 * for a written page which means we get ENOSPC checking when writing into
2393 * holes and correct delalloc and unwritten extent mapping on filesystems that
2394 * support these features.
2395 *
2396 * We are not allowed to take the i_mutex here so we have to play games to
2397 * protect against truncate races as the page could now be beyond EOF. Because
2398 * vmtruncate() writes the inode size before removing pages, once we have the
2399 * page lock we can determine safely if the page is beyond EOF. If it is not
2400 * beyond EOF, then the page is guaranteed safe against truncation until we
2401 * unlock the page.
2402 */
2403int
Nick Pigginc2ec1752009-03-31 15:23:21 -07002404block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
David Chinner54171692007-07-19 17:39:55 +10002405 get_block_t get_block)
2406{
Nick Pigginc2ec1752009-03-31 15:23:21 -07002407 struct page *page = vmf->page;
David Chinner54171692007-07-19 17:39:55 +10002408 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2409 unsigned long end;
2410 loff_t size;
Nick Piggin56a76f82009-03-31 15:23:23 -07002411 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
David Chinner54171692007-07-19 17:39:55 +10002412
2413 lock_page(page);
2414 size = i_size_read(inode);
2415 if ((page->mapping != inode->i_mapping) ||
Nick Piggin18336332007-07-20 00:31:45 -07002416 (page_offset(page) > size)) {
David Chinner54171692007-07-19 17:39:55 +10002417 /* page got truncated out from underneath us */
Nick Pigginb827e492009-04-30 15:08:16 -07002418 unlock_page(page);
2419 goto out;
David Chinner54171692007-07-19 17:39:55 +10002420 }
2421
2422 /* page is wholly or partially inside EOF */
2423 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2424 end = size & ~PAGE_CACHE_MASK;
2425 else
2426 end = PAGE_CACHE_SIZE;
2427
2428 ret = block_prepare_write(page, 0, end, get_block);
2429 if (!ret)
2430 ret = block_commit_write(page, 0, end);
2431
Nick Piggin56a76f82009-03-31 15:23:23 -07002432 if (unlikely(ret)) {
Nick Pigginb827e492009-04-30 15:08:16 -07002433 unlock_page(page);
Nick Piggin56a76f82009-03-31 15:23:23 -07002434 if (ret == -ENOMEM)
2435 ret = VM_FAULT_OOM;
2436 else /* -ENOSPC, -EIO, etc */
2437 ret = VM_FAULT_SIGBUS;
Nick Pigginb827e492009-04-30 15:08:16 -07002438 } else
2439 ret = VM_FAULT_LOCKED;
Nick Pigginc2ec1752009-03-31 15:23:21 -07002440
Nick Pigginb827e492009-04-30 15:08:16 -07002441out:
David Chinner54171692007-07-19 17:39:55 +10002442 return ret;
2443}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002444EXPORT_SYMBOL(block_page_mkwrite);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445
2446/*
Nick Piggin03158cd2007-10-16 01:25:25 -07002447 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448 * immediately, while under the page lock. So it needs a special end_io
2449 * handler which does not touch the bh after unlocking it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 */
2451static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2452{
Dmitry Monakhov68671f32007-10-16 01:24:47 -07002453 __end_buffer_read_notouch(bh, uptodate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454}
2455
2456/*
Nick Piggin03158cd2007-10-16 01:25:25 -07002457 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2458 * the page (converting it to circular linked list and taking care of page
2459 * dirty races).
2460 */
2461static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2462{
2463 struct buffer_head *bh;
2464
2465 BUG_ON(!PageLocked(page));
2466
2467 spin_lock(&page->mapping->private_lock);
2468 bh = head;
2469 do {
2470 if (PageDirty(page))
2471 set_buffer_dirty(bh);
2472 if (!bh->b_this_page)
2473 bh->b_this_page = head;
2474 bh = bh->b_this_page;
2475 } while (bh != head);
2476 attach_page_buffers(page, head);
2477 spin_unlock(&page->mapping->private_lock);
2478}
2479
2480/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 * On entry, the page is fully not uptodate.
2482 * On exit the page is fully uptodate in the areas outside (from,to)
2483 */
Nick Piggin03158cd2007-10-16 01:25:25 -07002484int nobh_write_begin(struct file *file, struct address_space *mapping,
2485 loff_t pos, unsigned len, unsigned flags,
2486 struct page **pagep, void **fsdata,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 get_block_t *get_block)
2488{
Nick Piggin03158cd2007-10-16 01:25:25 -07002489 struct inode *inode = mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490 const unsigned blkbits = inode->i_blkbits;
2491 const unsigned blocksize = 1 << blkbits;
Nick Piggina4b06722007-10-16 01:24:48 -07002492 struct buffer_head *head, *bh;
Nick Piggin03158cd2007-10-16 01:25:25 -07002493 struct page *page;
2494 pgoff_t index;
2495 unsigned from, to;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496 unsigned block_in_page;
Nick Piggina4b06722007-10-16 01:24:48 -07002497 unsigned block_start, block_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 sector_t block_in_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499 int nr_reads = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 int ret = 0;
2501 int is_mapped_to_disk = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502
Nick Piggin03158cd2007-10-16 01:25:25 -07002503 index = pos >> PAGE_CACHE_SHIFT;
2504 from = pos & (PAGE_CACHE_SIZE - 1);
2505 to = from + len;
2506
Nick Piggin54566b22009-01-04 12:00:53 -08002507 page = grab_cache_page_write_begin(mapping, index, flags);
Nick Piggin03158cd2007-10-16 01:25:25 -07002508 if (!page)
2509 return -ENOMEM;
2510 *pagep = page;
2511 *fsdata = NULL;
2512
2513 if (page_has_buffers(page)) {
2514 unlock_page(page);
2515 page_cache_release(page);
2516 *pagep = NULL;
2517 return block_write_begin(file, mapping, pos, len, flags, pagep,
2518 fsdata, get_block);
2519 }
Nick Piggina4b06722007-10-16 01:24:48 -07002520
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 if (PageMappedToDisk(page))
2522 return 0;
2523
Nick Piggina4b06722007-10-16 01:24:48 -07002524 /*
2525 * Allocate buffers so that we can keep track of state, and potentially
2526 * attach them to the page if an error occurs. In the common case of
2527 * no error, they will just be freed again without ever being attached
2528 * to the page (which is all OK, because we're under the page lock).
2529 *
2530 * Be careful: the buffer linked list is a NULL terminated one, rather
2531 * than the circular one we're used to.
2532 */
2533 head = alloc_page_buffers(page, blocksize, 0);
Nick Piggin03158cd2007-10-16 01:25:25 -07002534 if (!head) {
2535 ret = -ENOMEM;
2536 goto out_release;
2537 }
Nick Piggina4b06722007-10-16 01:24:48 -07002538
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540
2541 /*
2542 * We loop across all blocks in the page, whether or not they are
2543 * part of the affected region. This is so we can discover if the
2544 * page is fully mapped-to-disk.
2545 */
Nick Piggina4b06722007-10-16 01:24:48 -07002546 for (block_start = 0, block_in_page = 0, bh = head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 block_start < PAGE_CACHE_SIZE;
Nick Piggina4b06722007-10-16 01:24:48 -07002548 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 int create;
2550
Nick Piggina4b06722007-10-16 01:24:48 -07002551 block_end = block_start + blocksize;
2552 bh->b_state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 create = 1;
2554 if (block_start >= to)
2555 create = 0;
2556 ret = get_block(inode, block_in_file + block_in_page,
Nick Piggina4b06722007-10-16 01:24:48 -07002557 bh, create);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558 if (ret)
2559 goto failed;
Nick Piggina4b06722007-10-16 01:24:48 -07002560 if (!buffer_mapped(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561 is_mapped_to_disk = 0;
Nick Piggina4b06722007-10-16 01:24:48 -07002562 if (buffer_new(bh))
2563 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2564 if (PageUptodate(page)) {
2565 set_buffer_uptodate(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 continue;
Nick Piggina4b06722007-10-16 01:24:48 -07002567 }
2568 if (buffer_new(bh) || !buffer_mapped(bh)) {
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002569 zero_user_segments(page, block_start, from,
2570 to, block_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571 continue;
2572 }
Nick Piggina4b06722007-10-16 01:24:48 -07002573 if (buffer_uptodate(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574 continue; /* reiserfs does this */
2575 if (block_start < from || block_end > to) {
Nick Piggina4b06722007-10-16 01:24:48 -07002576 lock_buffer(bh);
2577 bh->b_end_io = end_buffer_read_nobh;
2578 submit_bh(READ, bh);
2579 nr_reads++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580 }
2581 }
2582
2583 if (nr_reads) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 /*
2585 * The page is locked, so these buffers are protected from
2586 * any VM or truncate activity. Hence we don't need to care
2587 * for the buffer_head refcounts.
2588 */
Nick Piggina4b06722007-10-16 01:24:48 -07002589 for (bh = head; bh; bh = bh->b_this_page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 wait_on_buffer(bh);
2591 if (!buffer_uptodate(bh))
2592 ret = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 }
2594 if (ret)
2595 goto failed;
2596 }
2597
2598 if (is_mapped_to_disk)
2599 SetPageMappedToDisk(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600
Nick Piggin03158cd2007-10-16 01:25:25 -07002601 *fsdata = head; /* to be released by nobh_write_end */
Nick Piggina4b06722007-10-16 01:24:48 -07002602
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 return 0;
2604
2605failed:
Nick Piggin03158cd2007-10-16 01:25:25 -07002606 BUG_ON(!ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 /*
Nick Piggina4b06722007-10-16 01:24:48 -07002608 * Error recovery is a bit difficult. We need to zero out blocks that
2609 * were newly allocated, and dirty them to ensure they get written out.
2610 * Buffers need to be attached to the page at this point, otherwise
2611 * the handling of potential IO errors during writeout would be hard
2612 * (could try doing synchronous writeout, but what if that fails too?)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613 */
Nick Piggin03158cd2007-10-16 01:25:25 -07002614 attach_nobh_buffers(page, head);
2615 page_zero_new_buffers(page, from, to);
Nick Piggina4b06722007-10-16 01:24:48 -07002616
Nick Piggin03158cd2007-10-16 01:25:25 -07002617out_release:
2618 unlock_page(page);
2619 page_cache_release(page);
2620 *pagep = NULL;
Nick Piggina4b06722007-10-16 01:24:48 -07002621
Nick Piggin03158cd2007-10-16 01:25:25 -07002622 if (pos + len > inode->i_size)
2623 vmtruncate(inode, inode->i_size);
Nick Piggina4b06722007-10-16 01:24:48 -07002624
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625 return ret;
2626}
Nick Piggin03158cd2007-10-16 01:25:25 -07002627EXPORT_SYMBOL(nobh_write_begin);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628
Nick Piggin03158cd2007-10-16 01:25:25 -07002629int nobh_write_end(struct file *file, struct address_space *mapping,
2630 loff_t pos, unsigned len, unsigned copied,
2631 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632{
2633 struct inode *inode = page->mapping->host;
Nick Pigginefdc3132007-10-21 06:57:41 +02002634 struct buffer_head *head = fsdata;
Nick Piggin03158cd2007-10-16 01:25:25 -07002635 struct buffer_head *bh;
Dmitri Monakhov5b41e742008-03-28 14:15:52 -07002636 BUG_ON(fsdata != NULL && page_has_buffers(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637
Dave Kleikampd4cf1092009-02-06 14:59:26 -06002638 if (unlikely(copied < len) && head)
Dmitri Monakhov5b41e742008-03-28 14:15:52 -07002639 attach_nobh_buffers(page, head);
2640 if (page_has_buffers(page))
2641 return generic_write_end(file, mapping, pos, len,
2642 copied, page, fsdata);
Nick Piggina4b06722007-10-16 01:24:48 -07002643
Nick Piggin22c8ca72007-02-20 13:58:09 -08002644 SetPageUptodate(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645 set_page_dirty(page);
Nick Piggin03158cd2007-10-16 01:25:25 -07002646 if (pos+copied > inode->i_size) {
2647 i_size_write(inode, pos+copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648 mark_inode_dirty(inode);
2649 }
Nick Piggin03158cd2007-10-16 01:25:25 -07002650
2651 unlock_page(page);
2652 page_cache_release(page);
2653
Nick Piggin03158cd2007-10-16 01:25:25 -07002654 while (head) {
2655 bh = head;
2656 head = head->b_this_page;
2657 free_buffer_head(bh);
2658 }
2659
2660 return copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661}
Nick Piggin03158cd2007-10-16 01:25:25 -07002662EXPORT_SYMBOL(nobh_write_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663
2664/*
2665 * nobh_writepage() - based on block_full_write_page() except
2666 * that it tries to operate without attaching bufferheads to
2667 * the page.
2668 */
2669int nobh_writepage(struct page *page, get_block_t *get_block,
2670 struct writeback_control *wbc)
2671{
2672 struct inode * const inode = page->mapping->host;
2673 loff_t i_size = i_size_read(inode);
2674 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2675 unsigned offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676 int ret;
2677
2678 /* Is the page fully inside i_size? */
2679 if (page->index < end_index)
2680 goto out;
2681
2682 /* Is the page fully outside i_size? (truncate in progress) */
2683 offset = i_size & (PAGE_CACHE_SIZE-1);
2684 if (page->index >= end_index+1 || !offset) {
2685 /*
2686 * The page may have dirty, unmapped buffers. For example,
2687 * they may have been added in ext3_writepage(). Make them
2688 * freeable here, so the page does not leak.
2689 */
2690#if 0
2691 /* Not really sure about this - do we need this ? */
2692 if (page->mapping->a_ops->invalidatepage)
2693 page->mapping->a_ops->invalidatepage(page, offset);
2694#endif
2695 unlock_page(page);
2696 return 0; /* don't care */
2697 }
2698
2699 /*
2700 * The page straddles i_size. It must be zeroed out on each and every
2701 * writepage invocation because it may be mmapped. "A file is mapped
2702 * in multiples of the page size. For a file that is not a multiple of
2703 * the page size, the remaining memory is zeroed when mapped, and
2704 * writes to that region are not written out to the file."
2705 */
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002706 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707out:
2708 ret = mpage_writepage(page, get_block, wbc);
2709 if (ret == -EAGAIN)
Chris Mason35c80d52009-04-15 13:22:38 -04002710 ret = __block_write_full_page(inode, page, get_block, wbc,
2711 end_buffer_async_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712 return ret;
2713}
2714EXPORT_SYMBOL(nobh_writepage);
2715
Nick Piggin03158cd2007-10-16 01:25:25 -07002716int nobh_truncate_page(struct address_space *mapping,
2717 loff_t from, get_block_t *get_block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2720 unsigned offset = from & (PAGE_CACHE_SIZE-1);
Nick Piggin03158cd2007-10-16 01:25:25 -07002721 unsigned blocksize;
2722 sector_t iblock;
2723 unsigned length, pos;
2724 struct inode *inode = mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 struct page *page;
Nick Piggin03158cd2007-10-16 01:25:25 -07002726 struct buffer_head map_bh;
2727 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728
Nick Piggin03158cd2007-10-16 01:25:25 -07002729 blocksize = 1 << inode->i_blkbits;
2730 length = offset & (blocksize - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731
Nick Piggin03158cd2007-10-16 01:25:25 -07002732 /* Block boundary? Nothing to do */
2733 if (!length)
2734 return 0;
2735
2736 length = blocksize - length;
2737 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2738
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739 page = grab_cache_page(mapping, index);
Nick Piggin03158cd2007-10-16 01:25:25 -07002740 err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741 if (!page)
2742 goto out;
2743
Nick Piggin03158cd2007-10-16 01:25:25 -07002744 if (page_has_buffers(page)) {
2745has_buffers:
2746 unlock_page(page);
2747 page_cache_release(page);
2748 return block_truncate_page(mapping, from, get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 }
Nick Piggin03158cd2007-10-16 01:25:25 -07002750
2751 /* Find the buffer that contains "offset" */
2752 pos = blocksize;
2753 while (offset >= pos) {
2754 iblock++;
2755 pos += blocksize;
2756 }
2757
Theodore Ts'o460bcf52009-05-12 07:37:56 -04002758 map_bh.b_size = blocksize;
2759 map_bh.b_state = 0;
Nick Piggin03158cd2007-10-16 01:25:25 -07002760 err = get_block(inode, iblock, &map_bh, 0);
2761 if (err)
2762 goto unlock;
2763 /* unmapped? It's a hole - nothing to do */
2764 if (!buffer_mapped(&map_bh))
2765 goto unlock;
2766
2767 /* Ok, it's mapped. Make sure it's up-to-date */
2768 if (!PageUptodate(page)) {
2769 err = mapping->a_ops->readpage(NULL, page);
2770 if (err) {
2771 page_cache_release(page);
2772 goto out;
2773 }
2774 lock_page(page);
2775 if (!PageUptodate(page)) {
2776 err = -EIO;
2777 goto unlock;
2778 }
2779 if (page_has_buffers(page))
2780 goto has_buffers;
2781 }
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002782 zero_user(page, offset, length);
Nick Piggin03158cd2007-10-16 01:25:25 -07002783 set_page_dirty(page);
2784 err = 0;
2785
2786unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787 unlock_page(page);
2788 page_cache_release(page);
2789out:
Nick Piggin03158cd2007-10-16 01:25:25 -07002790 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791}
2792EXPORT_SYMBOL(nobh_truncate_page);
2793
2794int block_truncate_page(struct address_space *mapping,
2795 loff_t from, get_block_t *get_block)
2796{
2797 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2798 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2799 unsigned blocksize;
Andrew Morton54b21a72006-01-08 01:03:05 -08002800 sector_t iblock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801 unsigned length, pos;
2802 struct inode *inode = mapping->host;
2803 struct page *page;
2804 struct buffer_head *bh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 int err;
2806
2807 blocksize = 1 << inode->i_blkbits;
2808 length = offset & (blocksize - 1);
2809
2810 /* Block boundary? Nothing to do */
2811 if (!length)
2812 return 0;
2813
2814 length = blocksize - length;
Andrew Morton54b21a72006-01-08 01:03:05 -08002815 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816
2817 page = grab_cache_page(mapping, index);
2818 err = -ENOMEM;
2819 if (!page)
2820 goto out;
2821
2822 if (!page_has_buffers(page))
2823 create_empty_buffers(page, blocksize, 0);
2824
2825 /* Find the buffer that contains "offset" */
2826 bh = page_buffers(page);
2827 pos = blocksize;
2828 while (offset >= pos) {
2829 bh = bh->b_this_page;
2830 iblock++;
2831 pos += blocksize;
2832 }
2833
2834 err = 0;
2835 if (!buffer_mapped(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002836 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 err = get_block(inode, iblock, bh, 0);
2838 if (err)
2839 goto unlock;
2840 /* unmapped? It's a hole - nothing to do */
2841 if (!buffer_mapped(bh))
2842 goto unlock;
2843 }
2844
2845 /* Ok, it's mapped. Make sure it's up-to-date */
2846 if (PageUptodate(page))
2847 set_buffer_uptodate(bh);
2848
David Chinner33a266d2007-02-12 00:51:41 -08002849 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850 err = -EIO;
2851 ll_rw_block(READ, 1, &bh);
2852 wait_on_buffer(bh);
2853 /* Uhhuh. Read error. Complain and punt. */
2854 if (!buffer_uptodate(bh))
2855 goto unlock;
2856 }
2857
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002858 zero_user(page, offset, length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859 mark_buffer_dirty(bh);
2860 err = 0;
2861
2862unlock:
2863 unlock_page(page);
2864 page_cache_release(page);
2865out:
2866 return err;
2867}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002868EXPORT_SYMBOL(block_truncate_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869
2870/*
2871 * The generic ->writepage function for buffer-backed address_spaces
Chris Mason35c80d52009-04-15 13:22:38 -04002872 * this form passes in the end_io handler used to finish the IO.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873 */
Chris Mason35c80d52009-04-15 13:22:38 -04002874int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2875 struct writeback_control *wbc, bh_end_io_t *handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876{
2877 struct inode * const inode = page->mapping->host;
2878 loff_t i_size = i_size_read(inode);
2879 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2880 unsigned offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881
2882 /* Is the page fully inside i_size? */
2883 if (page->index < end_index)
Chris Mason35c80d52009-04-15 13:22:38 -04002884 return __block_write_full_page(inode, page, get_block, wbc,
2885 handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886
2887 /* Is the page fully outside i_size? (truncate in progress) */
2888 offset = i_size & (PAGE_CACHE_SIZE-1);
2889 if (page->index >= end_index+1 || !offset) {
2890 /*
2891 * The page may have dirty, unmapped buffers. For example,
2892 * they may have been added in ext3_writepage(). Make them
2893 * freeable here, so the page does not leak.
2894 */
Jan Karaaaa40592005-10-30 15:00:16 -08002895 do_invalidatepage(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 unlock_page(page);
2897 return 0; /* don't care */
2898 }
2899
2900 /*
2901 * The page straddles i_size. It must be zeroed out on each and every
2902 * writepage invokation because it may be mmapped. "A file is mapped
2903 * in multiples of the page size. For a file that is not a multiple of
2904 * the page size, the remaining memory is zeroed when mapped, and
2905 * writes to that region are not written out to the file."
2906 */
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002907 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
Chris Mason35c80d52009-04-15 13:22:38 -04002908 return __block_write_full_page(inode, page, get_block, wbc, handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002910EXPORT_SYMBOL(block_write_full_page_endio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911
Chris Mason35c80d52009-04-15 13:22:38 -04002912/*
2913 * The generic ->writepage function for buffer-backed address_spaces
2914 */
2915int block_write_full_page(struct page *page, get_block_t *get_block,
2916 struct writeback_control *wbc)
2917{
2918 return block_write_full_page_endio(page, get_block, wbc,
2919 end_buffer_async_write);
2920}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002921EXPORT_SYMBOL(block_write_full_page);
Chris Mason35c80d52009-04-15 13:22:38 -04002922
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2924 get_block_t *get_block)
2925{
2926 struct buffer_head tmp;
2927 struct inode *inode = mapping->host;
2928 tmp.b_state = 0;
2929 tmp.b_blocknr = 0;
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002930 tmp.b_size = 1 << inode->i_blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931 get_block(inode, block, &tmp, 0);
2932 return tmp.b_blocknr;
2933}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002934EXPORT_SYMBOL(generic_block_bmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935
NeilBrown6712ecf2007-09-27 12:47:43 +02002936static void end_bio_bh_io_sync(struct bio *bio, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937{
2938 struct buffer_head *bh = bio->bi_private;
2939
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940 if (err == -EOPNOTSUPP) {
2941 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2942 set_bit(BH_Eopnotsupp, &bh->b_state);
2943 }
2944
Keith Mannthey08bafc02008-11-25 10:24:35 +01002945 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2946 set_bit(BH_Quiet, &bh->b_state);
2947
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2949 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950}
2951
2952int submit_bh(int rw, struct buffer_head * bh)
2953{
2954 struct bio *bio;
2955 int ret = 0;
2956
2957 BUG_ON(!buffer_locked(bh));
2958 BUG_ON(!buffer_mapped(bh));
2959 BUG_ON(!bh->b_end_io);
Aneesh Kumar K.V8fb0e342009-05-12 16:22:37 -04002960 BUG_ON(buffer_delay(bh));
2961 BUG_ON(buffer_unwritten(bh));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962
Jens Axboe48fd4f92008-08-22 10:00:36 +02002963 /*
2964 * Mask in barrier bit for a write (could be either a WRITE or a
2965 * WRITE_SYNC
2966 */
2967 if (buffer_ordered(bh) && (rw & WRITE))
2968 rw |= WRITE_BARRIER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969
2970 /*
Jens Axboe48fd4f92008-08-22 10:00:36 +02002971 * Only clear out a write error when rewriting
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972 */
Jens Axboe48fd4f92008-08-22 10:00:36 +02002973 if (test_set_buffer_req(bh) && (rw & WRITE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974 clear_buffer_write_io_error(bh);
2975
2976 /*
2977 * from here on down, it's all bio -- do the initial mapping,
2978 * submit_bio -> generic_make_request may further map this bio around
2979 */
2980 bio = bio_alloc(GFP_NOIO, 1);
2981
2982 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2983 bio->bi_bdev = bh->b_bdev;
2984 bio->bi_io_vec[0].bv_page = bh->b_page;
2985 bio->bi_io_vec[0].bv_len = bh->b_size;
2986 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2987
2988 bio->bi_vcnt = 1;
2989 bio->bi_idx = 0;
2990 bio->bi_size = bh->b_size;
2991
2992 bio->bi_end_io = end_bio_bh_io_sync;
2993 bio->bi_private = bh;
2994
2995 bio_get(bio);
2996 submit_bio(rw, bio);
2997
2998 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2999 ret = -EOPNOTSUPP;
3000
3001 bio_put(bio);
3002 return ret;
3003}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07003004EXPORT_SYMBOL(submit_bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005
3006/**
3007 * ll_rw_block: low-level access to block devices (DEPRECATED)
Jan Karaa7662232005-09-06 15:19:10 -07003008 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009 * @nr: number of &struct buffer_heads in the array
3010 * @bhs: array of pointers to &struct buffer_head
3011 *
Jan Karaa7662232005-09-06 15:19:10 -07003012 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
3013 * requests an I/O operation on them, either a %READ or a %WRITE. The third
3014 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
3015 * are sent to disk. The fourth %READA option is described in the documentation
3016 * for generic_make_request() which ll_rw_block() calls.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017 *
3018 * This function drops any buffer that it cannot get a lock on (with the
Jan Karaa7662232005-09-06 15:19:10 -07003019 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
3020 * clean when doing a write request, and any buffer that appears to be
3021 * up-to-date when doing read request. Further it marks as clean buffers that
3022 * are processed for writing (the buffer cache won't assume that they are
3023 * actually clean until the buffer gets unlocked).
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024 *
3025 * ll_rw_block sets b_end_io to simple completion handler that marks
3026 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
3027 * any waiters.
3028 *
3029 * All of the buffers must be for the same device, and must also be a
3030 * multiple of the current approved size for the device.
3031 */
3032void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3033{
3034 int i;
3035
3036 for (i = 0; i < nr; i++) {
3037 struct buffer_head *bh = bhs[i];
3038
Jens Axboe9cf6b722009-04-06 14:48:03 +02003039 if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
Jan Karaa7662232005-09-06 15:19:10 -07003040 lock_buffer(bh);
Nick Pigginca5de402008-08-02 12:02:13 +02003041 else if (!trylock_buffer(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042 continue;
3043
Jens Axboe9cf6b722009-04-06 14:48:03 +02003044 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
3045 rw == SWRITE_SYNC_PLUG) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046 if (test_clear_buffer_dirty(bh)) {
akpm@osdl.org76c30732005-04-16 15:24:07 -07003047 bh->b_end_io = end_buffer_write_sync;
OGAWA Hirofumie60e5c52006-02-03 03:04:43 -08003048 get_bh(bh);
Jens Axboe18ce3752008-07-01 09:07:34 +02003049 if (rw == SWRITE_SYNC)
3050 submit_bh(WRITE_SYNC, bh);
3051 else
3052 submit_bh(WRITE, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053 continue;
3054 }
3055 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056 if (!buffer_uptodate(bh)) {
akpm@osdl.org76c30732005-04-16 15:24:07 -07003057 bh->b_end_io = end_buffer_read_sync;
OGAWA Hirofumie60e5c52006-02-03 03:04:43 -08003058 get_bh(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059 submit_bh(rw, bh);
3060 continue;
3061 }
3062 }
3063 unlock_buffer(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064 }
3065}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07003066EXPORT_SYMBOL(ll_rw_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003067
3068/*
3069 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3070 * and then start new I/O and then wait upon it. The caller must have a ref on
3071 * the buffer_head.
3072 */
3073int sync_dirty_buffer(struct buffer_head *bh)
3074{
3075 int ret = 0;
3076
3077 WARN_ON(atomic_read(&bh->b_count) < 1);
3078 lock_buffer(bh);
3079 if (test_clear_buffer_dirty(bh)) {
3080 get_bh(bh);
3081 bh->b_end_io = end_buffer_write_sync;
Jens Axboe1aa2a7c2009-04-06 14:48:08 +02003082 ret = submit_bh(WRITE_SYNC, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083 wait_on_buffer(bh);
3084 if (buffer_eopnotsupp(bh)) {
3085 clear_buffer_eopnotsupp(bh);
3086 ret = -EOPNOTSUPP;
3087 }
3088 if (!ret && !buffer_uptodate(bh))
3089 ret = -EIO;
3090 } else {
3091 unlock_buffer(bh);
3092 }
3093 return ret;
3094}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07003095EXPORT_SYMBOL(sync_dirty_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003096
3097/*
3098 * try_to_free_buffers() checks if all the buffers on this particular page
3099 * are unused, and releases them if so.
3100 *
3101 * Exclusion against try_to_free_buffers may be obtained by either
3102 * locking the page or by holding its mapping's private_lock.
3103 *
3104 * If the page is dirty but all the buffers are clean then we need to
3105 * be sure to mark the page clean as well. This is because the page
3106 * may be against a block device, and a later reattachment of buffers
3107 * to a dirty page will set *all* buffers dirty. Which would corrupt
3108 * filesystem data on the same device.
3109 *
3110 * The same applies to regular filesystem pages: if all the buffers are
3111 * clean then we set the page clean and proceed. To do that, we require
3112 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3113 * private_lock.
3114 *
3115 * try_to_free_buffers() is non-blocking.
3116 */
3117static inline int buffer_busy(struct buffer_head *bh)
3118{
3119 return atomic_read(&bh->b_count) |
3120 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3121}
3122
3123static int
3124drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3125{
3126 struct buffer_head *head = page_buffers(page);
3127 struct buffer_head *bh;
3128
3129 bh = head;
3130 do {
akpm@osdl.orgde7d5a32005-05-01 08:58:39 -07003131 if (buffer_write_io_error(bh) && page->mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132 set_bit(AS_EIO, &page->mapping->flags);
3133 if (buffer_busy(bh))
3134 goto failed;
3135 bh = bh->b_this_page;
3136 } while (bh != head);
3137
3138 do {
3139 struct buffer_head *next = bh->b_this_page;
3140
Jan Kara535ee2f2008-02-08 04:21:59 -08003141 if (bh->b_assoc_map)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142 __remove_assoc_queue(bh);
3143 bh = next;
3144 } while (bh != head);
3145 *buffers_to_free = head;
3146 __clear_page_buffers(page);
3147 return 1;
3148failed:
3149 return 0;
3150}
3151
3152int try_to_free_buffers(struct page *page)
3153{
3154 struct address_space * const mapping = page->mapping;
3155 struct buffer_head *buffers_to_free = NULL;
3156 int ret = 0;
3157
3158 BUG_ON(!PageLocked(page));
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003159 if (PageWriteback(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160 return 0;
3161
3162 if (mapping == NULL) { /* can this still happen? */
3163 ret = drop_buffers(page, &buffers_to_free);
3164 goto out;
3165 }
3166
3167 spin_lock(&mapping->private_lock);
3168 ret = drop_buffers(page, &buffers_to_free);
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003169
3170 /*
3171 * If the filesystem writes its buffers by hand (eg ext3)
3172 * then we can have clean buffers against a dirty page. We
3173 * clean the page here; otherwise the VM will never notice
3174 * that the filesystem did any IO at all.
3175 *
3176 * Also, during truncate, discard_buffer will have marked all
3177 * the page's buffers clean. We discover that here and clean
3178 * the page also.
Nick Piggin87df7242007-01-30 14:36:27 +11003179 *
3180 * private_lock must be held over this entire operation in order
3181 * to synchronise against __set_page_dirty_buffers and prevent the
3182 * dirty bit from being lost.
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003183 */
3184 if (ret)
3185 cancel_dirty_page(page, PAGE_CACHE_SIZE);
Nick Piggin87df7242007-01-30 14:36:27 +11003186 spin_unlock(&mapping->private_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187out:
3188 if (buffers_to_free) {
3189 struct buffer_head *bh = buffers_to_free;
3190
3191 do {
3192 struct buffer_head *next = bh->b_this_page;
3193 free_buffer_head(bh);
3194 bh = next;
3195 } while (bh != buffers_to_free);
3196 }
3197 return ret;
3198}
3199EXPORT_SYMBOL(try_to_free_buffers);
3200
NeilBrown3978d712006-03-26 01:37:17 -08003201void block_sync_page(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202{
3203 struct address_space *mapping;
3204
3205 smp_mb();
3206 mapping = page_mapping(page);
3207 if (mapping)
3208 blk_run_backing_dev(mapping->backing_dev_info, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07003210EXPORT_SYMBOL(block_sync_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211
3212/*
3213 * There are no bdflush tunables left. But distributions are
3214 * still running obsolete flush daemons, so we terminate them here.
3215 *
3216 * Use of bdflush() is deprecated and will be removed in a future kernel.
3217 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3218 */
Heiko Carstensbdc480e2009-01-14 14:14:12 +01003219SYSCALL_DEFINE2(bdflush, int, func, long, data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003220{
3221 static int msg_count;
3222
3223 if (!capable(CAP_SYS_ADMIN))
3224 return -EPERM;
3225
3226 if (msg_count < 5) {
3227 msg_count++;
3228 printk(KERN_INFO
3229 "warning: process `%s' used the obsolete bdflush"
3230 " system call\n", current->comm);
3231 printk(KERN_INFO "Fix your initscripts?\n");
3232 }
3233
3234 if (func == 1)
3235 do_exit(0);
3236 return 0;
3237}
3238
3239/*
3240 * Buffer-head allocation
3241 */
Christoph Lametere18b8902006-12-06 20:33:20 -08003242static struct kmem_cache *bh_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243
3244/*
3245 * Once the number of bh's in the machine exceeds this level, we start
3246 * stripping them in writeback.
3247 */
3248static int max_buffer_heads;
3249
3250int buffer_heads_over_limit;
3251
3252struct bh_accounting {
3253 int nr; /* Number of live bh's */
3254 int ratelimit; /* Limit cacheline bouncing */
3255};
3256
3257static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3258
3259static void recalc_bh_state(void)
3260{
3261 int i;
3262 int tot = 0;
3263
3264 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3265 return;
3266 __get_cpu_var(bh_accounting).ratelimit = 0;
Eric Dumazet8a143422006-03-24 03:18:10 -08003267 for_each_online_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268 tot += per_cpu(bh_accounting, i).nr;
3269 buffer_heads_over_limit = (tot > max_buffer_heads);
3270}
3271
Al Virodd0fc662005-10-07 07:46:04 +01003272struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273{
Christoph Lameter488514d2008-04-28 02:12:05 -07003274 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003275 if (ret) {
Christoph Lametera35afb82007-05-16 22:10:57 -07003276 INIT_LIST_HEAD(&ret->b_assoc_buffers);
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003277 get_cpu_var(bh_accounting).nr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278 recalc_bh_state();
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003279 put_cpu_var(bh_accounting);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280 }
3281 return ret;
3282}
3283EXPORT_SYMBOL(alloc_buffer_head);
3284
3285void free_buffer_head(struct buffer_head *bh)
3286{
3287 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3288 kmem_cache_free(bh_cachep, bh);
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003289 get_cpu_var(bh_accounting).nr--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290 recalc_bh_state();
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003291 put_cpu_var(bh_accounting);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003292}
3293EXPORT_SYMBOL(free_buffer_head);
3294
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295static void buffer_exit_cpu(int cpu)
3296{
3297 int i;
3298 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3299
3300 for (i = 0; i < BH_LRU_SIZE; i++) {
3301 brelse(b->bhs[i]);
3302 b->bhs[i] = NULL;
3303 }
Eric Dumazet8a143422006-03-24 03:18:10 -08003304 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3305 per_cpu(bh_accounting, cpu).nr = 0;
3306 put_cpu_var(bh_accounting);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307}
3308
3309static int buffer_cpu_notify(struct notifier_block *self,
3310 unsigned long action, void *hcpu)
3311{
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07003312 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003313 buffer_exit_cpu((unsigned long)hcpu);
3314 return NOTIFY_OK;
3315}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316
Aneesh Kumar K.V389d1b02008-01-28 23:58:26 -05003317/**
Randy Dunlapa6b91912008-03-19 17:01:00 -07003318 * bh_uptodate_or_lock - Test whether the buffer is uptodate
Aneesh Kumar K.V389d1b02008-01-28 23:58:26 -05003319 * @bh: struct buffer_head
3320 *
3321 * Return true if the buffer is up-to-date and false,
3322 * with the buffer locked, if not.
3323 */
3324int bh_uptodate_or_lock(struct buffer_head *bh)
3325{
3326 if (!buffer_uptodate(bh)) {
3327 lock_buffer(bh);
3328 if (!buffer_uptodate(bh))
3329 return 0;
3330 unlock_buffer(bh);
3331 }
3332 return 1;
3333}
3334EXPORT_SYMBOL(bh_uptodate_or_lock);
3335
3336/**
Randy Dunlapa6b91912008-03-19 17:01:00 -07003337 * bh_submit_read - Submit a locked buffer for reading
Aneesh Kumar K.V389d1b02008-01-28 23:58:26 -05003338 * @bh: struct buffer_head
3339 *
3340 * Returns zero on success and -EIO on error.
3341 */
3342int bh_submit_read(struct buffer_head *bh)
3343{
3344 BUG_ON(!buffer_locked(bh));
3345
3346 if (buffer_uptodate(bh)) {
3347 unlock_buffer(bh);
3348 return 0;
3349 }
3350
3351 get_bh(bh);
3352 bh->b_end_io = end_buffer_read_sync;
3353 submit_bh(READ, bh);
3354 wait_on_buffer(bh);
3355 if (buffer_uptodate(bh))
3356 return 0;
3357 return -EIO;
3358}
3359EXPORT_SYMBOL(bh_submit_read);
3360
Christoph Lameterb98938c2008-02-04 22:28:36 -08003361static void
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07003362init_buffer_head(void *data)
Christoph Lameterb98938c2008-02-04 22:28:36 -08003363{
3364 struct buffer_head *bh = data;
3365
3366 memset(bh, 0, sizeof(*bh));
3367 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3368}
3369
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370void __init buffer_init(void)
3371{
3372 int nrpages;
3373
Christoph Lameterb98938c2008-02-04 22:28:36 -08003374 bh_cachep = kmem_cache_create("buffer_head",
3375 sizeof(struct buffer_head), 0,
3376 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3377 SLAB_MEM_SPREAD),
3378 init_buffer_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379
3380 /*
3381 * Limit the bh occupancy to 10% of ZONE_NORMAL
3382 */
3383 nrpages = (nr_free_buffer_pages() * 10) / 100;
3384 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3385 hotcpu_notifier(buffer_cpu_notify, 0);
3386}