blob: ad0112900222c2013bf3e0aca0cd1985ce25d65c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/kernel.h>
22#include <linux/syscalls.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
Randy Dunlap16f7e0f2006-01-11 12:17:46 -080027#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/blkdev.h>
29#include <linux/file.h>
30#include <linux/quotaops.h>
31#include <linux/highmem.h>
32#include <linux/module.h>
33#include <linux/writeback.h>
34#include <linux/hash.h>
35#include <linux/suspend.h>
36#include <linux/buffer_head.h>
Andrew Morton55e829a2006-12-10 02:19:27 -080037#include <linux/task_io_accounting_ops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/bio.h>
39#include <linux/notifier.h>
40#include <linux/cpu.h>
41#include <linux/bitops.h>
42#include <linux/mpage.h>
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070043#include <linux/bit_spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49inline void
50init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51{
52 bh->b_end_io = handler;
53 bh->b_private = private;
54}
55
56static int sync_buffer(void *word)
57{
58 struct block_device *bd;
59 struct buffer_head *bh
60 = container_of(word, struct buffer_head, b_state);
61
62 smp_mb();
63 bd = bh->b_bdev;
64 if (bd)
65 blk_run_address_space(bd->bd_inode->i_mapping);
66 io_schedule();
67 return 0;
68}
69
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -080070void __lock_buffer(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 TASK_UNINTERRUPTIBLE);
74}
75EXPORT_SYMBOL(__lock_buffer);
76
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -080077void unlock_buffer(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078{
Nick Piggin51b07fc2008-10-18 20:27:00 -070079 clear_bit_unlock(BH_Lock, &bh->b_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 smp_mb__after_clear_bit();
81 wake_up_bit(&bh->b_state, BH_Lock);
82}
83
84/*
85 * Block until a buffer comes unlocked. This doesn't stop it
86 * from becoming locked again - you have to lock it yourself
87 * if you want to preserve its state.
88 */
89void __wait_on_buffer(struct buffer_head * bh)
90{
91 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
92}
93
94static void
95__clear_page_buffers(struct page *page)
96{
97 ClearPagePrivate(page);
Hugh Dickins4c21e2f2005-10-29 18:16:40 -070098 set_page_private(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 page_cache_release(page);
100}
101
Keith Mannthey08bafc02008-11-25 10:24:35 +0100102
103static int quiet_error(struct buffer_head *bh)
104{
105 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
106 return 0;
107 return 1;
108}
109
110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111static void buffer_io_error(struct buffer_head *bh)
112{
113 char b[BDEVNAME_SIZE];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
115 bdevname(bh->b_bdev, b),
116 (unsigned long long)bh->b_blocknr);
117}
118
119/*
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700120 * End-of-IO handler helper function which does not touch the bh after
121 * unlocking it.
122 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
123 * a race there is benign: unlock_buffer() only use the bh's address for
124 * hashing after unlocking the buffer, so it doesn't actually touch the bh
125 * itself.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 */
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700127static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128{
129 if (uptodate) {
130 set_buffer_uptodate(bh);
131 } else {
132 /* This happens, due to failed READA attempts. */
133 clear_buffer_uptodate(bh);
134 }
135 unlock_buffer(bh);
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700136}
137
138/*
139 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
140 * unlock the buffer. This is what ll_rw_block uses too.
141 */
142void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
143{
144 __end_buffer_read_notouch(bh, uptodate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 put_bh(bh);
146}
147
148void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
149{
150 char b[BDEVNAME_SIZE];
151
152 if (uptodate) {
153 set_buffer_uptodate(bh);
154 } else {
Keith Mannthey08bafc02008-11-25 10:24:35 +0100155 if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 buffer_io_error(bh);
157 printk(KERN_WARNING "lost page write due to "
158 "I/O error on %s\n",
159 bdevname(bh->b_bdev, b));
160 }
161 set_buffer_write_io_error(bh);
162 clear_buffer_uptodate(bh);
163 }
164 unlock_buffer(bh);
165 put_bh(bh);
166}
167
168/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 * Various filesystems appear to want __find_get_block to be non-blocking.
170 * But it's the page lock which protects the buffers. To get around this,
171 * we get exclusion from try_to_free_buffers with the blockdev mapping's
172 * private_lock.
173 *
174 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
175 * may be quite high. This code could TryLock the page, and if that
176 * succeeds, there is no need to take private_lock. (But if
177 * private_lock is contended then so is mapping->tree_lock).
178 */
179static struct buffer_head *
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -0800180__find_get_block_slow(struct block_device *bdev, sector_t block)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181{
182 struct inode *bd_inode = bdev->bd_inode;
183 struct address_space *bd_mapping = bd_inode->i_mapping;
184 struct buffer_head *ret = NULL;
185 pgoff_t index;
186 struct buffer_head *bh;
187 struct buffer_head *head;
188 struct page *page;
189 int all_mapped = 1;
190
191 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
192 page = find_get_page(bd_mapping, index);
193 if (!page)
194 goto out;
195
196 spin_lock(&bd_mapping->private_lock);
197 if (!page_has_buffers(page))
198 goto out_unlock;
199 head = page_buffers(page);
200 bh = head;
201 do {
Nikanth Karthikesan97f76d32009-04-02 16:56:46 -0700202 if (!buffer_mapped(bh))
203 all_mapped = 0;
204 else if (bh->b_blocknr == block) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 ret = bh;
206 get_bh(bh);
207 goto out_unlock;
208 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 bh = bh->b_this_page;
210 } while (bh != head);
211
212 /* we might be here because some of the buffers on this page are
213 * not mapped. This is due to various races between
214 * file io on the block device and getblk. It gets dealt with
215 * elsewhere, don't buffer_error if we had some unmapped buffers
216 */
217 if (all_mapped) {
218 printk("__find_get_block_slow() failed. "
219 "block=%llu, b_blocknr=%llu\n",
Badari Pulavarty205f87f2006-03-26 01:38:00 -0800220 (unsigned long long)block,
221 (unsigned long long)bh->b_blocknr);
222 printk("b_state=0x%08lx, b_size=%zu\n",
223 bh->b_state, bh->b_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
225 }
226out_unlock:
227 spin_unlock(&bd_mapping->private_lock);
228 page_cache_release(page);
229out:
230 return ret;
231}
232
233/* If invalidate_buffers() will trash dirty buffers, it means some kind
234 of fs corruption is going on. Trashing dirty data always imply losing
235 information that was supposed to be just stored on the physical layer
236 by the user.
237
238 Thus invalidate_buffers in general usage is not allwowed to trash
239 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
240 be preserved. These buffers are simply skipped.
241
242 We also skip buffers which are still in use. For example this can
243 happen if a userspace program is reading the block device.
244
245 NOTE: In the case where the user removed a removable-media-disk even if
246 there's still dirty data not synced on disk (due a bug in the device driver
247 or due an error of the user), by not destroying the dirty buffers we could
248 generate corruption also on the next media inserted, thus a parameter is
249 necessary to handle this case in the most safe way possible (trying
250 to not corrupt also the new disk inserted with the data belonging to
251 the old now corrupted disk). Also for the ramdisk the natural thing
252 to do in order to release the ramdisk memory is to destroy dirty buffers.
253
254 These are two special cases. Normal usage imply the device driver
255 to issue a sync on the device (without waiting I/O completion) and
256 then an invalidate_buffers call that doesn't trash dirty buffers.
257
258 For handling cache coherency with the blkdev pagecache the 'update' case
259 is been introduced. It is needed to re-read from disk any pinned
260 buffer. NOTE: re-reading from disk is destructive so we can do it only
261 when we assume nobody is changing the buffercache under our I/O and when
262 we think the disk contains more recent information than the buffercache.
263 The update == 1 pass marks the buffers we need to update, the update == 2
264 pass does the actual I/O. */
Peter Zijlstraf98393a2007-05-06 14:49:54 -0700265void invalidate_bdev(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
Andrew Morton0e1dfc62006-07-30 03:03:28 -0700267 struct address_space *mapping = bdev->bd_inode->i_mapping;
268
269 if (mapping->nrpages == 0)
270 return;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 invalidate_bh_lrus();
Andrew Mortonfc0ecff2007-02-10 01:45:39 -0800273 invalidate_mapping_pages(mapping, 0, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274}
275
276/*
277 * Kick pdflush then try to free up some ZONE_NORMAL memory.
278 */
279static void free_more_memory(void)
280{
Mel Gorman19770b32008-04-28 02:12:18 -0700281 struct zone *zone;
Mel Gorman0e884602008-04-28 02:12:14 -0700282 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
Pekka J Enberg687a21c2005-06-28 20:44:55 -0700284 wakeup_pdflush(1024);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 yield();
286
Mel Gorman0e884602008-04-28 02:12:14 -0700287 for_each_online_node(nid) {
Mel Gorman19770b32008-04-28 02:12:18 -0700288 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
289 gfp_zone(GFP_NOFS), NULL,
290 &zone);
291 if (zone)
Mel Gorman54a6eb52008-04-28 02:12:16 -0700292 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
KAMEZAWA Hiroyuki327c0e92009-03-31 15:23:31 -0700293 GFP_NOFS, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 }
295}
296
297/*
298 * I/O completion handler for block_read_full_page() - pages
299 * which come unlocked at the end of I/O.
300 */
301static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
302{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 unsigned long flags;
Nick Piggina3972202005-07-07 17:56:56 -0700304 struct buffer_head *first;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 struct buffer_head *tmp;
306 struct page *page;
307 int page_uptodate = 1;
308
309 BUG_ON(!buffer_async_read(bh));
310
311 page = bh->b_page;
312 if (uptodate) {
313 set_buffer_uptodate(bh);
314 } else {
315 clear_buffer_uptodate(bh);
Keith Mannthey08bafc02008-11-25 10:24:35 +0100316 if (!quiet_error(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 buffer_io_error(bh);
318 SetPageError(page);
319 }
320
321 /*
322 * Be _very_ careful from here on. Bad things can happen if
323 * two buffer heads end IO at almost the same time and both
324 * decide that the page is now completely done.
325 */
Nick Piggina3972202005-07-07 17:56:56 -0700326 first = page_buffers(page);
327 local_irq_save(flags);
328 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 clear_buffer_async_read(bh);
330 unlock_buffer(bh);
331 tmp = bh;
332 do {
333 if (!buffer_uptodate(tmp))
334 page_uptodate = 0;
335 if (buffer_async_read(tmp)) {
336 BUG_ON(!buffer_locked(tmp));
337 goto still_busy;
338 }
339 tmp = tmp->b_this_page;
340 } while (tmp != bh);
Nick Piggina3972202005-07-07 17:56:56 -0700341 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
342 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
344 /*
345 * If none of the buffers had errors and they are all
346 * uptodate then we can set the page uptodate.
347 */
348 if (page_uptodate && !PageError(page))
349 SetPageUptodate(page);
350 unlock_page(page);
351 return;
352
353still_busy:
Nick Piggina3972202005-07-07 17:56:56 -0700354 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
355 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 return;
357}
358
359/*
360 * Completion handler for block_write_full_page() - pages which are unlocked
361 * during I/O, and which have PageWriteback cleared upon I/O completion.
362 */
Chris Mason35c80d52009-04-15 13:22:38 -0400363void end_buffer_async_write(struct buffer_head *bh, int uptodate)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364{
365 char b[BDEVNAME_SIZE];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 unsigned long flags;
Nick Piggina3972202005-07-07 17:56:56 -0700367 struct buffer_head *first;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 struct buffer_head *tmp;
369 struct page *page;
370
371 BUG_ON(!buffer_async_write(bh));
372
373 page = bh->b_page;
374 if (uptodate) {
375 set_buffer_uptodate(bh);
376 } else {
Keith Mannthey08bafc02008-11-25 10:24:35 +0100377 if (!quiet_error(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 buffer_io_error(bh);
379 printk(KERN_WARNING "lost page write due to "
380 "I/O error on %s\n",
381 bdevname(bh->b_bdev, b));
382 }
383 set_bit(AS_EIO, &page->mapping->flags);
Jan Kara58ff4072006-10-17 00:10:19 -0700384 set_buffer_write_io_error(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 clear_buffer_uptodate(bh);
386 SetPageError(page);
387 }
388
Nick Piggina3972202005-07-07 17:56:56 -0700389 first = page_buffers(page);
390 local_irq_save(flags);
391 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 clear_buffer_async_write(bh);
394 unlock_buffer(bh);
395 tmp = bh->b_this_page;
396 while (tmp != bh) {
397 if (buffer_async_write(tmp)) {
398 BUG_ON(!buffer_locked(tmp));
399 goto still_busy;
400 }
401 tmp = tmp->b_this_page;
402 }
Nick Piggina3972202005-07-07 17:56:56 -0700403 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
404 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 end_page_writeback(page);
406 return;
407
408still_busy:
Nick Piggina3972202005-07-07 17:56:56 -0700409 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
410 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 return;
412}
413
414/*
415 * If a page's buffers are under async readin (end_buffer_async_read
416 * completion) then there is a possibility that another thread of
417 * control could lock one of the buffers after it has completed
418 * but while some of the other buffers have not completed. This
419 * locked buffer would confuse end_buffer_async_read() into not unlocking
420 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
421 * that this buffer is not under async I/O.
422 *
423 * The page comes unlocked when it has no locked buffer_async buffers
424 * left.
425 *
426 * PageLocked prevents anyone starting new async I/O reads any of
427 * the buffers.
428 *
429 * PageWriteback is used to prevent simultaneous writeout of the same
430 * page.
431 *
432 * PageLocked prevents anyone from starting writeback of a page which is
433 * under read I/O (PageWriteback is only ever set against a locked page).
434 */
435static void mark_buffer_async_read(struct buffer_head *bh)
436{
437 bh->b_end_io = end_buffer_async_read;
438 set_buffer_async_read(bh);
439}
440
Chris Mason35c80d52009-04-15 13:22:38 -0400441void mark_buffer_async_write_endio(struct buffer_head *bh,
442 bh_end_io_t *handler)
443{
444 bh->b_end_io = handler;
445 set_buffer_async_write(bh);
446}
447
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448void mark_buffer_async_write(struct buffer_head *bh)
449{
Chris Mason35c80d52009-04-15 13:22:38 -0400450 mark_buffer_async_write_endio(bh, end_buffer_async_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451}
452EXPORT_SYMBOL(mark_buffer_async_write);
453
454
455/*
456 * fs/buffer.c contains helper functions for buffer-backed address space's
457 * fsync functions. A common requirement for buffer-based filesystems is
458 * that certain data from the backing blockdev needs to be written out for
459 * a successful fsync(). For example, ext2 indirect blocks need to be
460 * written back and waited upon before fsync() returns.
461 *
462 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
463 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
464 * management of a list of dependent buffers at ->i_mapping->private_list.
465 *
466 * Locking is a little subtle: try_to_free_buffers() will remove buffers
467 * from their controlling inode's queue when they are being freed. But
468 * try_to_free_buffers() will be operating against the *blockdev* mapping
469 * at the time, not against the S_ISREG file which depends on those buffers.
470 * So the locking for private_list is via the private_lock in the address_space
471 * which backs the buffers. Which is different from the address_space
472 * against which the buffers are listed. So for a particular address_space,
473 * mapping->private_lock does *not* protect mapping->private_list! In fact,
474 * mapping->private_list will always be protected by the backing blockdev's
475 * ->private_lock.
476 *
477 * Which introduces a requirement: all buffers on an address_space's
478 * ->private_list must be from the same address_space: the blockdev's.
479 *
480 * address_spaces which do not place buffers at ->private_list via these
481 * utility functions are free to use private_lock and private_list for
482 * whatever they want. The only requirement is that list_empty(private_list)
483 * be true at clear_inode() time.
484 *
485 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
486 * filesystems should do that. invalidate_inode_buffers() should just go
487 * BUG_ON(!list_empty).
488 *
489 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
490 * take an address_space, not an inode. And it should be called
491 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
492 * queued up.
493 *
494 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
495 * list if it is already on a list. Because if the buffer is on a list,
496 * it *must* already be on the right one. If not, the filesystem is being
497 * silly. This will save a ton of locking. But first we have to ensure
498 * that buffers are taken *off* the old inode's list when they are freed
499 * (presumably in truncate). That requires careful auditing of all
500 * filesystems (do it inside bforget()). It could also be done by bringing
501 * b_inode back.
502 */
503
504/*
505 * The buffer's backing address_space's private_lock must be held
506 */
Thomas Petazzonidbacefc2008-07-29 22:33:47 -0700507static void __remove_assoc_queue(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508{
509 list_del_init(&bh->b_assoc_buffers);
Jan Kara58ff4072006-10-17 00:10:19 -0700510 WARN_ON(!bh->b_assoc_map);
511 if (buffer_write_io_error(bh))
512 set_bit(AS_EIO, &bh->b_assoc_map->flags);
513 bh->b_assoc_map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514}
515
516int inode_has_buffers(struct inode *inode)
517{
518 return !list_empty(&inode->i_data.private_list);
519}
520
521/*
522 * osync is designed to support O_SYNC io. It waits synchronously for
523 * all already-submitted IO to complete, but does not queue any new
524 * writes to the disk.
525 *
526 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
527 * you dirty the buffers, and then use osync_inode_buffers to wait for
528 * completion. Any other dirty buffers which are not yet queued for
529 * write will not be flushed to disk by the osync.
530 */
531static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
532{
533 struct buffer_head *bh;
534 struct list_head *p;
535 int err = 0;
536
537 spin_lock(lock);
538repeat:
539 list_for_each_prev(p, list) {
540 bh = BH_ENTRY(p);
541 if (buffer_locked(bh)) {
542 get_bh(bh);
543 spin_unlock(lock);
544 wait_on_buffer(bh);
545 if (!buffer_uptodate(bh))
546 err = -EIO;
547 brelse(bh);
548 spin_lock(lock);
549 goto repeat;
550 }
551 }
552 spin_unlock(lock);
553 return err;
554}
555
Jens Axboe053c5252009-04-08 13:44:08 +0200556void do_thaw_all(struct work_struct *work)
Eric Sandeenc2d75432009-03-31 15:23:46 -0700557{
558 struct super_block *sb;
559 char b[BDEVNAME_SIZE];
560
561 spin_lock(&sb_lock);
562restart:
563 list_for_each_entry(sb, &super_blocks, s_list) {
564 sb->s_count++;
565 spin_unlock(&sb_lock);
566 down_read(&sb->s_umount);
567 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
568 printk(KERN_WARNING "Emergency Thaw on %s\n",
569 bdevname(sb->s_bdev, b));
570 up_read(&sb->s_umount);
571 spin_lock(&sb_lock);
572 if (__put_super_and_need_restart(sb))
573 goto restart;
574 }
575 spin_unlock(&sb_lock);
Jens Axboe053c5252009-04-08 13:44:08 +0200576 kfree(work);
Eric Sandeenc2d75432009-03-31 15:23:46 -0700577 printk(KERN_WARNING "Emergency Thaw complete\n");
578}
579
580/**
581 * emergency_thaw_all -- forcibly thaw every frozen filesystem
582 *
583 * Used for emergency unfreeze of all filesystems via SysRq
584 */
585void emergency_thaw_all(void)
586{
Jens Axboe053c5252009-04-08 13:44:08 +0200587 struct work_struct *work;
588
589 work = kmalloc(sizeof(*work), GFP_ATOMIC);
590 if (work) {
591 INIT_WORK(work, do_thaw_all);
592 schedule_work(work);
593 }
Eric Sandeenc2d75432009-03-31 15:23:46 -0700594}
595
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596/**
Randy Dunlap78a4a502008-02-29 22:02:31 -0800597 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
Martin Waitz67be2dd2005-05-01 08:59:26 -0700598 * @mapping: the mapping which wants those buffers written
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 *
600 * Starts I/O against the buffers at mapping->private_list, and waits upon
601 * that I/O.
602 *
Martin Waitz67be2dd2005-05-01 08:59:26 -0700603 * Basically, this is a convenience function for fsync().
604 * @mapping is a file or directory which needs those buffers to be written for
605 * a successful fsync().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 */
607int sync_mapping_buffers(struct address_space *mapping)
608{
609 struct address_space *buffer_mapping = mapping->assoc_mapping;
610
611 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
612 return 0;
613
614 return fsync_buffers_list(&buffer_mapping->private_lock,
615 &mapping->private_list);
616}
617EXPORT_SYMBOL(sync_mapping_buffers);
618
619/*
620 * Called when we've recently written block `bblock', and it is known that
621 * `bblock' was for a buffer_boundary() buffer. This means that the block at
622 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
623 * dirty, schedule it for IO. So that indirects merge nicely with their data.
624 */
625void write_boundary_block(struct block_device *bdev,
626 sector_t bblock, unsigned blocksize)
627{
628 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
629 if (bh) {
630 if (buffer_dirty(bh))
631 ll_rw_block(WRITE, 1, &bh);
632 put_bh(bh);
633 }
634}
635
636void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
637{
638 struct address_space *mapping = inode->i_mapping;
639 struct address_space *buffer_mapping = bh->b_page->mapping;
640
641 mark_buffer_dirty(bh);
642 if (!mapping->assoc_mapping) {
643 mapping->assoc_mapping = buffer_mapping;
644 } else {
Eric Sesterhenne827f922006-03-26 18:24:46 +0200645 BUG_ON(mapping->assoc_mapping != buffer_mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 }
Jan Kara535ee2f2008-02-08 04:21:59 -0800647 if (!bh->b_assoc_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 spin_lock(&buffer_mapping->private_lock);
649 list_move_tail(&bh->b_assoc_buffers,
650 &mapping->private_list);
Jan Kara58ff4072006-10-17 00:10:19 -0700651 bh->b_assoc_map = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 spin_unlock(&buffer_mapping->private_lock);
653 }
654}
655EXPORT_SYMBOL(mark_buffer_dirty_inode);
656
657/*
Nick Piggin787d2212007-07-17 04:03:34 -0700658 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
659 * dirty.
660 *
661 * If warn is true, then emit a warning if the page is not uptodate and has
662 * not been truncated.
663 */
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700664static void __set_page_dirty(struct page *page,
Nick Piggin787d2212007-07-17 04:03:34 -0700665 struct address_space *mapping, int warn)
666{
Nick Piggin19fd6232008-07-25 19:45:32 -0700667 spin_lock_irq(&mapping->tree_lock);
Nick Piggin787d2212007-07-17 04:03:34 -0700668 if (page->mapping) { /* Race with truncate? */
669 WARN_ON_ONCE(warn && !PageUptodate(page));
Edward Shishkine3a7cca2009-03-31 15:19:39 -0700670 account_page_dirtied(page, mapping);
Nick Piggin787d2212007-07-17 04:03:34 -0700671 radix_tree_tag_set(&mapping->page_tree,
672 page_index(page), PAGECACHE_TAG_DIRTY);
673 }
Nick Piggin19fd6232008-07-25 19:45:32 -0700674 spin_unlock_irq(&mapping->tree_lock);
Nick Piggin787d2212007-07-17 04:03:34 -0700675 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
Nick Piggin787d2212007-07-17 04:03:34 -0700676}
677
678/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 * Add a page to the dirty page list.
680 *
681 * It is a sad fact of life that this function is called from several places
682 * deeply under spinlocking. It may not sleep.
683 *
684 * If the page has buffers, the uptodate buffers are set dirty, to preserve
685 * dirty-state coherency between the page and the buffers. It the page does
686 * not have buffers then when they are later attached they will all be set
687 * dirty.
688 *
689 * The buffers are dirtied before the page is dirtied. There's a small race
690 * window in which a writepage caller may see the page cleanness but not the
691 * buffer dirtiness. That's fine. If this code were to set the page dirty
692 * before the buffers, a concurrent writepage caller could clear the page dirty
693 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
694 * page on the dirty page list.
695 *
696 * We use private_lock to lock against try_to_free_buffers while using the
697 * page's buffer list. Also use this to protect against clean buffers being
698 * added to the page after it was set dirty.
699 *
700 * FIXME: may need to call ->reservepage here as well. That's rather up to the
701 * address_space though.
702 */
703int __set_page_dirty_buffers(struct page *page)
704{
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700705 int newly_dirty;
Nick Piggin787d2212007-07-17 04:03:34 -0700706 struct address_space *mapping = page_mapping(page);
Nick Pigginebf7a222006-10-10 04:36:54 +0200707
708 if (unlikely(!mapping))
709 return !TestSetPageDirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
711 spin_lock(&mapping->private_lock);
712 if (page_has_buffers(page)) {
713 struct buffer_head *head = page_buffers(page);
714 struct buffer_head *bh = head;
715
716 do {
717 set_buffer_dirty(bh);
718 bh = bh->b_this_page;
719 } while (bh != head);
720 }
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700721 newly_dirty = !TestSetPageDirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 spin_unlock(&mapping->private_lock);
723
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700724 if (newly_dirty)
725 __set_page_dirty(page, mapping, 1);
726 return newly_dirty;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727}
728EXPORT_SYMBOL(__set_page_dirty_buffers);
729
730/*
731 * Write out and wait upon a list of buffers.
732 *
733 * We have conflicting pressures: we want to make sure that all
734 * initially dirty buffers get waited on, but that any subsequently
735 * dirtied buffers don't. After all, we don't want fsync to last
736 * forever if somebody is actively writing to the file.
737 *
738 * Do this in two main stages: first we copy dirty buffers to a
739 * temporary inode list, queueing the writes as we go. Then we clean
740 * up, waiting for those writes to complete.
741 *
742 * During this second stage, any subsequent updates to the file may end
743 * up refiling the buffer on the original inode's dirty list again, so
744 * there is a chance we will end up with a buffer queued for write but
745 * not yet completed on that list. So, as a final cleanup we go through
746 * the osync code to catch these locked, dirty buffers without requeuing
747 * any newly dirty buffers for write.
748 */
749static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
750{
751 struct buffer_head *bh;
752 struct list_head tmp;
Jens Axboe9cf6b722009-04-06 14:48:03 +0200753 struct address_space *mapping, *prev_mapping = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 int err = 0, err2;
755
756 INIT_LIST_HEAD(&tmp);
757
758 spin_lock(lock);
759 while (!list_empty(list)) {
760 bh = BH_ENTRY(list->next);
Jan Kara535ee2f2008-02-08 04:21:59 -0800761 mapping = bh->b_assoc_map;
Jan Kara58ff4072006-10-17 00:10:19 -0700762 __remove_assoc_queue(bh);
Jan Kara535ee2f2008-02-08 04:21:59 -0800763 /* Avoid race with mark_buffer_dirty_inode() which does
764 * a lockless check and we rely on seeing the dirty bit */
765 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 if (buffer_dirty(bh) || buffer_locked(bh)) {
767 list_add(&bh->b_assoc_buffers, &tmp);
Jan Kara535ee2f2008-02-08 04:21:59 -0800768 bh->b_assoc_map = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 if (buffer_dirty(bh)) {
770 get_bh(bh);
771 spin_unlock(lock);
772 /*
773 * Ensure any pending I/O completes so that
774 * ll_rw_block() actually writes the current
775 * contents - it is a noop if I/O is still in
776 * flight on potentially older contents.
777 */
Jens Axboe9cf6b722009-04-06 14:48:03 +0200778 ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
779
780 /*
781 * Kick off IO for the previous mapping. Note
782 * that we will not run the very last mapping,
783 * wait_on_buffer() will do that for us
784 * through sync_buffer().
785 */
786 if (prev_mapping && prev_mapping != mapping)
787 blk_run_address_space(prev_mapping);
788 prev_mapping = mapping;
789
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 brelse(bh);
791 spin_lock(lock);
792 }
793 }
794 }
795
796 while (!list_empty(&tmp)) {
797 bh = BH_ENTRY(tmp.prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 get_bh(bh);
Jan Kara535ee2f2008-02-08 04:21:59 -0800799 mapping = bh->b_assoc_map;
800 __remove_assoc_queue(bh);
801 /* Avoid race with mark_buffer_dirty_inode() which does
802 * a lockless check and we rely on seeing the dirty bit */
803 smp_mb();
804 if (buffer_dirty(bh)) {
805 list_add(&bh->b_assoc_buffers,
Jan Karae3892292008-03-04 14:28:33 -0800806 &mapping->private_list);
Jan Kara535ee2f2008-02-08 04:21:59 -0800807 bh->b_assoc_map = mapping;
808 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 spin_unlock(lock);
810 wait_on_buffer(bh);
811 if (!buffer_uptodate(bh))
812 err = -EIO;
813 brelse(bh);
814 spin_lock(lock);
815 }
816
817 spin_unlock(lock);
818 err2 = osync_buffers_list(lock, list);
819 if (err)
820 return err;
821 else
822 return err2;
823}
824
825/*
826 * Invalidate any and all dirty buffers on a given inode. We are
827 * probably unmounting the fs, but that doesn't mean we have already
828 * done a sync(). Just drop the buffers from the inode list.
829 *
830 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
831 * assumes that all the buffers are against the blockdev. Not true
832 * for reiserfs.
833 */
834void invalidate_inode_buffers(struct inode *inode)
835{
836 if (inode_has_buffers(inode)) {
837 struct address_space *mapping = &inode->i_data;
838 struct list_head *list = &mapping->private_list;
839 struct address_space *buffer_mapping = mapping->assoc_mapping;
840
841 spin_lock(&buffer_mapping->private_lock);
842 while (!list_empty(list))
843 __remove_assoc_queue(BH_ENTRY(list->next));
844 spin_unlock(&buffer_mapping->private_lock);
845 }
846}
Jan Kara52b19ac2008-09-23 18:24:08 +0200847EXPORT_SYMBOL(invalidate_inode_buffers);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848
849/*
850 * Remove any clean buffers from the inode's buffer list. This is called
851 * when we're trying to free the inode itself. Those buffers can pin it.
852 *
853 * Returns true if all buffers were removed.
854 */
855int remove_inode_buffers(struct inode *inode)
856{
857 int ret = 1;
858
859 if (inode_has_buffers(inode)) {
860 struct address_space *mapping = &inode->i_data;
861 struct list_head *list = &mapping->private_list;
862 struct address_space *buffer_mapping = mapping->assoc_mapping;
863
864 spin_lock(&buffer_mapping->private_lock);
865 while (!list_empty(list)) {
866 struct buffer_head *bh = BH_ENTRY(list->next);
867 if (buffer_dirty(bh)) {
868 ret = 0;
869 break;
870 }
871 __remove_assoc_queue(bh);
872 }
873 spin_unlock(&buffer_mapping->private_lock);
874 }
875 return ret;
876}
877
878/*
879 * Create the appropriate buffers when given a page for data area and
880 * the size of each buffer.. Use the bh->b_this_page linked list to
881 * follow the buffers created. Return NULL if unable to create more
882 * buffers.
883 *
884 * The retry flag is used to differentiate async IO (paging, swapping)
885 * which may not fail from ordinary buffer allocations.
886 */
887struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
888 int retry)
889{
890 struct buffer_head *bh, *head;
891 long offset;
892
893try_again:
894 head = NULL;
895 offset = PAGE_SIZE;
896 while ((offset -= size) >= 0) {
897 bh = alloc_buffer_head(GFP_NOFS);
898 if (!bh)
899 goto no_grow;
900
901 bh->b_bdev = NULL;
902 bh->b_this_page = head;
903 bh->b_blocknr = -1;
904 head = bh;
905
906 bh->b_state = 0;
907 atomic_set(&bh->b_count, 0);
Chris Masonfc5cd582006-02-01 03:06:48 -0800908 bh->b_private = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 bh->b_size = size;
910
911 /* Link the buffer to its page */
912 set_bh_page(bh, page, offset);
913
Nathan Scott01ffe332006-01-17 09:02:07 +1100914 init_buffer(bh, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 }
916 return head;
917/*
918 * In case anything failed, we just free everything we got.
919 */
920no_grow:
921 if (head) {
922 do {
923 bh = head;
924 head = head->b_this_page;
925 free_buffer_head(bh);
926 } while (head);
927 }
928
929 /*
930 * Return failure for non-async IO requests. Async IO requests
931 * are not allowed to fail, so we have to wait until buffer heads
932 * become available. But we don't want tasks sleeping with
933 * partially complete buffers, so all were released above.
934 */
935 if (!retry)
936 return NULL;
937
938 /* We're _really_ low on memory. Now we just
939 * wait for old buffer heads to become free due to
940 * finishing IO. Since this is an async request and
941 * the reserve list is empty, we're sure there are
942 * async buffer heads in use.
943 */
944 free_more_memory();
945 goto try_again;
946}
947EXPORT_SYMBOL_GPL(alloc_page_buffers);
948
949static inline void
950link_dev_buffers(struct page *page, struct buffer_head *head)
951{
952 struct buffer_head *bh, *tail;
953
954 bh = head;
955 do {
956 tail = bh;
957 bh = bh->b_this_page;
958 } while (bh);
959 tail->b_this_page = head;
960 attach_page_buffers(page, head);
961}
962
963/*
964 * Initialise the state of a blockdev page's buffers.
965 */
966static void
967init_page_buffers(struct page *page, struct block_device *bdev,
968 sector_t block, int size)
969{
970 struct buffer_head *head = page_buffers(page);
971 struct buffer_head *bh = head;
972 int uptodate = PageUptodate(page);
973
974 do {
975 if (!buffer_mapped(bh)) {
976 init_buffer(bh, NULL, NULL);
977 bh->b_bdev = bdev;
978 bh->b_blocknr = block;
979 if (uptodate)
980 set_buffer_uptodate(bh);
981 set_buffer_mapped(bh);
982 }
983 block++;
984 bh = bh->b_this_page;
985 } while (bh != head);
986}
987
988/*
989 * Create the page-cache page that contains the requested block.
990 *
991 * This is user purely for blockdev mappings.
992 */
993static struct page *
994grow_dev_page(struct block_device *bdev, sector_t block,
995 pgoff_t index, int size)
996{
997 struct inode *inode = bdev->bd_inode;
998 struct page *page;
999 struct buffer_head *bh;
1000
Christoph Lameterea125892007-05-16 22:11:21 -07001001 page = find_or_create_page(inode->i_mapping, index,
Mel Gorman769848c2007-07-17 04:03:05 -07001002 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 if (!page)
1004 return NULL;
1005
Eric Sesterhenne827f922006-03-26 18:24:46 +02001006 BUG_ON(!PageLocked(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007
1008 if (page_has_buffers(page)) {
1009 bh = page_buffers(page);
1010 if (bh->b_size == size) {
1011 init_page_buffers(page, bdev, block, size);
1012 return page;
1013 }
1014 if (!try_to_free_buffers(page))
1015 goto failed;
1016 }
1017
1018 /*
1019 * Allocate some buffers for this page
1020 */
1021 bh = alloc_page_buffers(page, size, 0);
1022 if (!bh)
1023 goto failed;
1024
1025 /*
1026 * Link the page to the buffers and initialise them. Take the
1027 * lock to be atomic wrt __find_get_block(), which does not
1028 * run under the page lock.
1029 */
1030 spin_lock(&inode->i_mapping->private_lock);
1031 link_dev_buffers(page, bh);
1032 init_page_buffers(page, bdev, block, size);
1033 spin_unlock(&inode->i_mapping->private_lock);
1034 return page;
1035
1036failed:
1037 BUG();
1038 unlock_page(page);
1039 page_cache_release(page);
1040 return NULL;
1041}
1042
1043/*
1044 * Create buffers for the specified block device block's page. If
1045 * that page was dirty, the buffers are set dirty also.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001047static int
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048grow_buffers(struct block_device *bdev, sector_t block, int size)
1049{
1050 struct page *page;
1051 pgoff_t index;
1052 int sizebits;
1053
1054 sizebits = -1;
1055 do {
1056 sizebits++;
1057 } while ((size << sizebits) < PAGE_SIZE);
1058
1059 index = block >> sizebits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060
Andrew Mortone5657932006-10-11 01:21:46 -07001061 /*
1062 * Check for a block which wants to lie outside our maximum possible
1063 * pagecache index. (this comparison is done using sector_t types).
1064 */
1065 if (unlikely(index != block >> sizebits)) {
1066 char b[BDEVNAME_SIZE];
1067
1068 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1069 "device %s\n",
Harvey Harrison8e24eea2008-04-30 00:55:09 -07001070 __func__, (unsigned long long)block,
Andrew Mortone5657932006-10-11 01:21:46 -07001071 bdevname(bdev, b));
1072 return -EIO;
1073 }
1074 block = index << sizebits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 /* Create a page with the proper size buffers.. */
1076 page = grow_dev_page(bdev, block, index, size);
1077 if (!page)
1078 return 0;
1079 unlock_page(page);
1080 page_cache_release(page);
1081 return 1;
1082}
1083
Adrian Bunk75c96f82005-05-05 16:16:09 -07001084static struct buffer_head *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085__getblk_slow(struct block_device *bdev, sector_t block, int size)
1086{
1087 /* Size must be multiple of hard sectorsize */
1088 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1089 (size < 512 || size > PAGE_SIZE))) {
1090 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1091 size);
1092 printk(KERN_ERR "hardsect size: %d\n",
1093 bdev_hardsect_size(bdev));
1094
1095 dump_stack();
1096 return NULL;
1097 }
1098
1099 for (;;) {
1100 struct buffer_head * bh;
Andrew Mortone5657932006-10-11 01:21:46 -07001101 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102
1103 bh = __find_get_block(bdev, block, size);
1104 if (bh)
1105 return bh;
1106
Andrew Mortone5657932006-10-11 01:21:46 -07001107 ret = grow_buffers(bdev, block, size);
1108 if (ret < 0)
1109 return NULL;
1110 if (ret == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 free_more_memory();
1112 }
1113}
1114
1115/*
1116 * The relationship between dirty buffers and dirty pages:
1117 *
1118 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1119 * the page is tagged dirty in its radix tree.
1120 *
1121 * At all times, the dirtiness of the buffers represents the dirtiness of
1122 * subsections of the page. If the page has buffers, the page dirty bit is
1123 * merely a hint about the true dirty state.
1124 *
1125 * When a page is set dirty in its entirety, all its buffers are marked dirty
1126 * (if the page has buffers).
1127 *
1128 * When a buffer is marked dirty, its page is dirtied, but the page's other
1129 * buffers are not.
1130 *
1131 * Also. When blockdev buffers are explicitly read with bread(), they
1132 * individually become uptodate. But their backing page remains not
1133 * uptodate - even if all of its buffers are uptodate. A subsequent
1134 * block_read_full_page() against that page will discover all the uptodate
1135 * buffers, will set the page uptodate and will perform no I/O.
1136 */
1137
1138/**
1139 * mark_buffer_dirty - mark a buffer_head as needing writeout
Martin Waitz67be2dd2005-05-01 08:59:26 -07001140 * @bh: the buffer_head to mark dirty
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 *
1142 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1143 * backing page dirty, then tag the page as dirty in its address_space's radix
1144 * tree and then attach the address_space's inode to its superblock's dirty
1145 * inode list.
1146 *
1147 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1148 * mapping->tree_lock and the global inode_lock.
1149 */
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -08001150void mark_buffer_dirty(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151{
Nick Piggin787d2212007-07-17 04:03:34 -07001152 WARN_ON_ONCE(!buffer_uptodate(bh));
Linus Torvalds1be62dc2008-04-04 14:38:17 -07001153
1154 /*
1155 * Very *carefully* optimize the it-is-already-dirty case.
1156 *
1157 * Don't let the final "is it dirty" escape to before we
1158 * perhaps modified the buffer.
1159 */
1160 if (buffer_dirty(bh)) {
1161 smp_mb();
1162 if (buffer_dirty(bh))
1163 return;
1164 }
1165
Linus Torvaldsa8e7d492009-03-19 11:32:05 -07001166 if (!test_set_buffer_dirty(bh)) {
1167 struct page *page = bh->b_page;
1168 if (!TestSetPageDirty(page))
1169 __set_page_dirty(page, page_mapping(page), 0);
1170 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171}
1172
1173/*
1174 * Decrement a buffer_head's reference count. If all buffers against a page
1175 * have zero reference count, are clean and unlocked, and if the page is clean
1176 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1177 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1178 * a page but it ends up not being freed, and buffers may later be reattached).
1179 */
1180void __brelse(struct buffer_head * buf)
1181{
1182 if (atomic_read(&buf->b_count)) {
1183 put_bh(buf);
1184 return;
1185 }
Arjan van de Ven5c752ad2008-07-25 19:45:40 -07001186 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187}
1188
1189/*
1190 * bforget() is like brelse(), except it discards any
1191 * potentially dirty data.
1192 */
1193void __bforget(struct buffer_head *bh)
1194{
1195 clear_buffer_dirty(bh);
Jan Kara535ee2f2008-02-08 04:21:59 -08001196 if (bh->b_assoc_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 struct address_space *buffer_mapping = bh->b_page->mapping;
1198
1199 spin_lock(&buffer_mapping->private_lock);
1200 list_del_init(&bh->b_assoc_buffers);
Jan Kara58ff4072006-10-17 00:10:19 -07001201 bh->b_assoc_map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 spin_unlock(&buffer_mapping->private_lock);
1203 }
1204 __brelse(bh);
1205}
1206
1207static struct buffer_head *__bread_slow(struct buffer_head *bh)
1208{
1209 lock_buffer(bh);
1210 if (buffer_uptodate(bh)) {
1211 unlock_buffer(bh);
1212 return bh;
1213 } else {
1214 get_bh(bh);
1215 bh->b_end_io = end_buffer_read_sync;
1216 submit_bh(READ, bh);
1217 wait_on_buffer(bh);
1218 if (buffer_uptodate(bh))
1219 return bh;
1220 }
1221 brelse(bh);
1222 return NULL;
1223}
1224
1225/*
1226 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1227 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1228 * refcount elevated by one when they're in an LRU. A buffer can only appear
1229 * once in a particular CPU's LRU. A single buffer can be present in multiple
1230 * CPU's LRUs at the same time.
1231 *
1232 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1233 * sb_find_get_block().
1234 *
1235 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1236 * a local interrupt disable for that.
1237 */
1238
1239#define BH_LRU_SIZE 8
1240
1241struct bh_lru {
1242 struct buffer_head *bhs[BH_LRU_SIZE];
1243};
1244
1245static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1246
1247#ifdef CONFIG_SMP
1248#define bh_lru_lock() local_irq_disable()
1249#define bh_lru_unlock() local_irq_enable()
1250#else
1251#define bh_lru_lock() preempt_disable()
1252#define bh_lru_unlock() preempt_enable()
1253#endif
1254
1255static inline void check_irqs_on(void)
1256{
1257#ifdef irqs_disabled
1258 BUG_ON(irqs_disabled());
1259#endif
1260}
1261
1262/*
1263 * The LRU management algorithm is dopey-but-simple. Sorry.
1264 */
1265static void bh_lru_install(struct buffer_head *bh)
1266{
1267 struct buffer_head *evictee = NULL;
1268 struct bh_lru *lru;
1269
1270 check_irqs_on();
1271 bh_lru_lock();
1272 lru = &__get_cpu_var(bh_lrus);
1273 if (lru->bhs[0] != bh) {
1274 struct buffer_head *bhs[BH_LRU_SIZE];
1275 int in;
1276 int out = 0;
1277
1278 get_bh(bh);
1279 bhs[out++] = bh;
1280 for (in = 0; in < BH_LRU_SIZE; in++) {
1281 struct buffer_head *bh2 = lru->bhs[in];
1282
1283 if (bh2 == bh) {
1284 __brelse(bh2);
1285 } else {
1286 if (out >= BH_LRU_SIZE) {
1287 BUG_ON(evictee != NULL);
1288 evictee = bh2;
1289 } else {
1290 bhs[out++] = bh2;
1291 }
1292 }
1293 }
1294 while (out < BH_LRU_SIZE)
1295 bhs[out++] = NULL;
1296 memcpy(lru->bhs, bhs, sizeof(bhs));
1297 }
1298 bh_lru_unlock();
1299
1300 if (evictee)
1301 __brelse(evictee);
1302}
1303
1304/*
1305 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1306 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001307static struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001308lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309{
1310 struct buffer_head *ret = NULL;
1311 struct bh_lru *lru;
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001312 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
1314 check_irqs_on();
1315 bh_lru_lock();
1316 lru = &__get_cpu_var(bh_lrus);
1317 for (i = 0; i < BH_LRU_SIZE; i++) {
1318 struct buffer_head *bh = lru->bhs[i];
1319
1320 if (bh && bh->b_bdev == bdev &&
1321 bh->b_blocknr == block && bh->b_size == size) {
1322 if (i) {
1323 while (i) {
1324 lru->bhs[i] = lru->bhs[i - 1];
1325 i--;
1326 }
1327 lru->bhs[0] = bh;
1328 }
1329 get_bh(bh);
1330 ret = bh;
1331 break;
1332 }
1333 }
1334 bh_lru_unlock();
1335 return ret;
1336}
1337
1338/*
1339 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1340 * it in the LRU and mark it as accessed. If it is not present then return
1341 * NULL
1342 */
1343struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001344__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345{
1346 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1347
1348 if (bh == NULL) {
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -08001349 bh = __find_get_block_slow(bdev, block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 if (bh)
1351 bh_lru_install(bh);
1352 }
1353 if (bh)
1354 touch_buffer(bh);
1355 return bh;
1356}
1357EXPORT_SYMBOL(__find_get_block);
1358
1359/*
1360 * __getblk will locate (and, if necessary, create) the buffer_head
1361 * which corresponds to the passed block_device, block and size. The
1362 * returned buffer has its reference count incremented.
1363 *
1364 * __getblk() cannot fail - it just keeps trying. If you pass it an
1365 * illegal block number, __getblk() will happily return a buffer_head
1366 * which represents the non-existent block. Very weird.
1367 *
1368 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1369 * attempt is failing. FIXME, perhaps?
1370 */
1371struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001372__getblk(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373{
1374 struct buffer_head *bh = __find_get_block(bdev, block, size);
1375
1376 might_sleep();
1377 if (bh == NULL)
1378 bh = __getblk_slow(bdev, block, size);
1379 return bh;
1380}
1381EXPORT_SYMBOL(__getblk);
1382
1383/*
1384 * Do async read-ahead on a buffer..
1385 */
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001386void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387{
1388 struct buffer_head *bh = __getblk(bdev, block, size);
Andrew Mortona3e713b2005-10-30 15:03:15 -08001389 if (likely(bh)) {
1390 ll_rw_block(READA, 1, &bh);
1391 brelse(bh);
1392 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393}
1394EXPORT_SYMBOL(__breadahead);
1395
1396/**
1397 * __bread() - reads a specified block and returns the bh
Martin Waitz67be2dd2005-05-01 08:59:26 -07001398 * @bdev: the block_device to read from
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 * @block: number of block
1400 * @size: size (in bytes) to read
1401 *
1402 * Reads a specified block, and returns buffer head that contains it.
1403 * It returns NULL if the block was unreadable.
1404 */
1405struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001406__bread(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407{
1408 struct buffer_head *bh = __getblk(bdev, block, size);
1409
Andrew Mortona3e713b2005-10-30 15:03:15 -08001410 if (likely(bh) && !buffer_uptodate(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 bh = __bread_slow(bh);
1412 return bh;
1413}
1414EXPORT_SYMBOL(__bread);
1415
1416/*
1417 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1418 * This doesn't race because it runs in each cpu either in irq
1419 * or with preempt disabled.
1420 */
1421static void invalidate_bh_lru(void *arg)
1422{
1423 struct bh_lru *b = &get_cpu_var(bh_lrus);
1424 int i;
1425
1426 for (i = 0; i < BH_LRU_SIZE; i++) {
1427 brelse(b->bhs[i]);
1428 b->bhs[i] = NULL;
1429 }
1430 put_cpu_var(bh_lrus);
1431}
1432
Peter Zijlstraf9a14392007-05-06 14:49:55 -07001433void invalidate_bh_lrus(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434{
Jens Axboe15c8b6c2008-05-09 09:39:44 +02001435 on_each_cpu(invalidate_bh_lru, NULL, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436}
Nick Piggin9db55792008-02-08 04:19:49 -08001437EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438
1439void set_bh_page(struct buffer_head *bh,
1440 struct page *page, unsigned long offset)
1441{
1442 bh->b_page = page;
Eric Sesterhenne827f922006-03-26 18:24:46 +02001443 BUG_ON(offset >= PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 if (PageHighMem(page))
1445 /*
1446 * This catches illegal uses and preserves the offset:
1447 */
1448 bh->b_data = (char *)(0 + offset);
1449 else
1450 bh->b_data = page_address(page) + offset;
1451}
1452EXPORT_SYMBOL(set_bh_page);
1453
1454/*
1455 * Called when truncating a buffer on a page completely.
1456 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001457static void discard_buffer(struct buffer_head * bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458{
1459 lock_buffer(bh);
1460 clear_buffer_dirty(bh);
1461 bh->b_bdev = NULL;
1462 clear_buffer_mapped(bh);
1463 clear_buffer_req(bh);
1464 clear_buffer_new(bh);
1465 clear_buffer_delay(bh);
David Chinner33a266d2007-02-12 00:51:41 -08001466 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 unlock_buffer(bh);
1468}
1469
1470/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 * block_invalidatepage - invalidate part of all of a buffer-backed page
1472 *
1473 * @page: the page which is affected
1474 * @offset: the index of the truncation point
1475 *
1476 * block_invalidatepage() is called when all or part of the page has become
1477 * invalidatedby a truncate operation.
1478 *
1479 * block_invalidatepage() does not have to release all buffers, but it must
1480 * ensure that no dirty buffer is left outside @offset and that no I/O
1481 * is underway against any of the blocks which are outside the truncation
1482 * point. Because the caller is about to free (and possibly reuse) those
1483 * blocks on-disk.
1484 */
NeilBrown2ff28e22006-03-26 01:37:18 -08001485void block_invalidatepage(struct page *page, unsigned long offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486{
1487 struct buffer_head *head, *bh, *next;
1488 unsigned int curr_off = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489
1490 BUG_ON(!PageLocked(page));
1491 if (!page_has_buffers(page))
1492 goto out;
1493
1494 head = page_buffers(page);
1495 bh = head;
1496 do {
1497 unsigned int next_off = curr_off + bh->b_size;
1498 next = bh->b_this_page;
1499
1500 /*
1501 * is this block fully invalidated?
1502 */
1503 if (offset <= curr_off)
1504 discard_buffer(bh);
1505 curr_off = next_off;
1506 bh = next;
1507 } while (bh != head);
1508
1509 /*
1510 * We release buffers only if the entire page is being invalidated.
1511 * The get_block cached value has been unconditionally invalidated,
1512 * so real IO is not possible anymore.
1513 */
1514 if (offset == 0)
NeilBrown2ff28e22006-03-26 01:37:18 -08001515 try_to_release_page(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516out:
NeilBrown2ff28e22006-03-26 01:37:18 -08001517 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518}
1519EXPORT_SYMBOL(block_invalidatepage);
1520
1521/*
1522 * We attach and possibly dirty the buffers atomically wrt
1523 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1524 * is already excluded via the page lock.
1525 */
1526void create_empty_buffers(struct page *page,
1527 unsigned long blocksize, unsigned long b_state)
1528{
1529 struct buffer_head *bh, *head, *tail;
1530
1531 head = alloc_page_buffers(page, blocksize, 1);
1532 bh = head;
1533 do {
1534 bh->b_state |= b_state;
1535 tail = bh;
1536 bh = bh->b_this_page;
1537 } while (bh);
1538 tail->b_this_page = head;
1539
1540 spin_lock(&page->mapping->private_lock);
1541 if (PageUptodate(page) || PageDirty(page)) {
1542 bh = head;
1543 do {
1544 if (PageDirty(page))
1545 set_buffer_dirty(bh);
1546 if (PageUptodate(page))
1547 set_buffer_uptodate(bh);
1548 bh = bh->b_this_page;
1549 } while (bh != head);
1550 }
1551 attach_page_buffers(page, head);
1552 spin_unlock(&page->mapping->private_lock);
1553}
1554EXPORT_SYMBOL(create_empty_buffers);
1555
1556/*
1557 * We are taking a block for data and we don't want any output from any
1558 * buffer-cache aliases starting from return from that function and
1559 * until the moment when something will explicitly mark the buffer
1560 * dirty (hopefully that will not happen until we will free that block ;-)
1561 * We don't even need to mark it not-uptodate - nobody can expect
1562 * anything from a newly allocated buffer anyway. We used to used
1563 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1564 * don't want to mark the alias unmapped, for example - it would confuse
1565 * anyone who might pick it with bread() afterwards...
1566 *
1567 * Also.. Note that bforget() doesn't lock the buffer. So there can
1568 * be writeout I/O going on against recently-freed buffers. We don't
1569 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1570 * only if we really need to. That happens here.
1571 */
1572void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1573{
1574 struct buffer_head *old_bh;
1575
1576 might_sleep();
1577
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -08001578 old_bh = __find_get_block_slow(bdev, block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 if (old_bh) {
1580 clear_buffer_dirty(old_bh);
1581 wait_on_buffer(old_bh);
1582 clear_buffer_req(old_bh);
1583 __brelse(old_bh);
1584 }
1585}
1586EXPORT_SYMBOL(unmap_underlying_metadata);
1587
1588/*
1589 * NOTE! All mapped/uptodate combinations are valid:
1590 *
1591 * Mapped Uptodate Meaning
1592 *
1593 * No No "unknown" - must do get_block()
1594 * No Yes "hole" - zero-filled
1595 * Yes No "allocated" - allocated on disk, not read in
1596 * Yes Yes "valid" - allocated and up-to-date in memory.
1597 *
1598 * "Dirty" is valid only with the last case (mapped+uptodate).
1599 */
1600
1601/*
1602 * While block_write_full_page is writing back the dirty buffers under
1603 * the page lock, whoever dirtied the buffers may decide to clean them
1604 * again at any time. We handle that by only looking at the buffer
1605 * state inside lock_buffer().
1606 *
1607 * If block_write_full_page() is called for regular writeback
1608 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1609 * locked buffer. This only can happen if someone has written the buffer
1610 * directly, with submit_bh(). At the address_space level PageWriteback
1611 * prevents this contention from occurring.
Theodore Ts'o6e34eedd2009-04-07 18:12:43 -04001612 *
1613 * If block_write_full_page() is called with wbc->sync_mode ==
1614 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
1615 * causes the writes to be flagged as synchronous writes, but the
1616 * block device queue will NOT be unplugged, since usually many pages
1617 * will be pushed to the out before the higher-level caller actually
1618 * waits for the writes to be completed. The various wait functions,
1619 * such as wait_on_writeback_range() will ultimately call sync_page()
1620 * which will ultimately call blk_run_backing_dev(), which will end up
1621 * unplugging the device queue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 */
1623static int __block_write_full_page(struct inode *inode, struct page *page,
Chris Mason35c80d52009-04-15 13:22:38 -04001624 get_block_t *get_block, struct writeback_control *wbc,
1625 bh_end_io_t *handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626{
1627 int err;
1628 sector_t block;
1629 sector_t last_block;
Andrew Mortonf0fbd5f2005-05-05 16:15:48 -07001630 struct buffer_head *bh, *head;
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001631 const unsigned blocksize = 1 << inode->i_blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 int nr_underway = 0;
Theodore Ts'o6e34eedd2009-04-07 18:12:43 -04001633 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1634 WRITE_SYNC_PLUG : WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635
1636 BUG_ON(!PageLocked(page));
1637
1638 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1639
1640 if (!page_has_buffers(page)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001641 create_empty_buffers(page, blocksize,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 (1 << BH_Dirty)|(1 << BH_Uptodate));
1643 }
1644
1645 /*
1646 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1647 * here, and the (potentially unmapped) buffers may become dirty at
1648 * any time. If a buffer becomes dirty here after we've inspected it
1649 * then we just miss that fact, and the page stays dirty.
1650 *
1651 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1652 * handle that here by just cleaning them.
1653 */
1654
Andrew Morton54b21a72006-01-08 01:03:05 -08001655 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 head = page_buffers(page);
1657 bh = head;
1658
1659 /*
1660 * Get all the dirty buffers mapped to disk addresses and
1661 * handle any aliases from the underlying blockdev's mapping.
1662 */
1663 do {
1664 if (block > last_block) {
1665 /*
1666 * mapped buffers outside i_size will occur, because
1667 * this page can be outside i_size when there is a
1668 * truncate in progress.
1669 */
1670 /*
1671 * The buffer was zeroed by block_write_full_page()
1672 */
1673 clear_buffer_dirty(bh);
1674 set_buffer_uptodate(bh);
Alex Tomas29a814d2008-07-11 19:27:31 -04001675 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1676 buffer_dirty(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001677 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 err = get_block(inode, block, bh, 1);
1679 if (err)
1680 goto recover;
Alex Tomas29a814d2008-07-11 19:27:31 -04001681 clear_buffer_delay(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 if (buffer_new(bh)) {
1683 /* blockdev mappings never come here */
1684 clear_buffer_new(bh);
1685 unmap_underlying_metadata(bh->b_bdev,
1686 bh->b_blocknr);
1687 }
1688 }
1689 bh = bh->b_this_page;
1690 block++;
1691 } while (bh != head);
1692
1693 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 if (!buffer_mapped(bh))
1695 continue;
1696 /*
1697 * If it's a fully non-blocking write attempt and we cannot
1698 * lock the buffer then redirty the page. Note that this can
1699 * potentially cause a busy-wait loop from pdflush and kswapd
1700 * activity, but those code paths have their own higher-level
1701 * throttling.
1702 */
1703 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1704 lock_buffer(bh);
Nick Pigginca5de402008-08-02 12:02:13 +02001705 } else if (!trylock_buffer(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 redirty_page_for_writepage(wbc, page);
1707 continue;
1708 }
1709 if (test_clear_buffer_dirty(bh)) {
Chris Mason35c80d52009-04-15 13:22:38 -04001710 mark_buffer_async_write_endio(bh, handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 } else {
1712 unlock_buffer(bh);
1713 }
1714 } while ((bh = bh->b_this_page) != head);
1715
1716 /*
1717 * The page and its buffers are protected by PageWriteback(), so we can
1718 * drop the bh refcounts early.
1719 */
1720 BUG_ON(PageWriteback(page));
1721 set_page_writeback(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722
1723 do {
1724 struct buffer_head *next = bh->b_this_page;
1725 if (buffer_async_write(bh)) {
Theodore Ts'oa64c8612009-03-27 22:14:10 -04001726 submit_bh(write_op, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 nr_underway++;
1728 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 bh = next;
1730 } while (bh != head);
Andrew Morton05937ba2005-05-05 16:15:47 -07001731 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732
1733 err = 0;
1734done:
1735 if (nr_underway == 0) {
1736 /*
1737 * The page was marked dirty, but the buffers were
1738 * clean. Someone wrote them back by hand with
1739 * ll_rw_block/submit_bh. A rare case.
1740 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 end_page_writeback(page);
Nick Piggin3d67f2d2007-05-06 14:49:05 -07001742
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 /*
1744 * The page and buffer_heads can be released at any time from
1745 * here on.
1746 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 }
1748 return err;
1749
1750recover:
1751 /*
1752 * ENOSPC, or some other error. We may already have added some
1753 * blocks to the file, so we need to write these out to avoid
1754 * exposing stale data.
1755 * The page is currently locked and not marked for writeback
1756 */
1757 bh = head;
1758 /* Recovery: lock and submit the mapped buffers */
1759 do {
Alex Tomas29a814d2008-07-11 19:27:31 -04001760 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1761 !buffer_delay(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 lock_buffer(bh);
Chris Mason35c80d52009-04-15 13:22:38 -04001763 mark_buffer_async_write_endio(bh, handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 } else {
1765 /*
1766 * The buffer may have been set dirty during
1767 * attachment to a dirty page.
1768 */
1769 clear_buffer_dirty(bh);
1770 }
1771 } while ((bh = bh->b_this_page) != head);
1772 SetPageError(page);
1773 BUG_ON(PageWriteback(page));
Andrew Morton7e4c3692007-05-08 00:23:27 -07001774 mapping_set_error(page->mapping, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 set_page_writeback(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 do {
1777 struct buffer_head *next = bh->b_this_page;
1778 if (buffer_async_write(bh)) {
1779 clear_buffer_dirty(bh);
Theodore Ts'oa64c8612009-03-27 22:14:10 -04001780 submit_bh(write_op, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 nr_underway++;
1782 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 bh = next;
1784 } while (bh != head);
Nick Pigginffda9d32007-02-20 13:57:54 -08001785 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 goto done;
1787}
1788
Nick Pigginafddba42007-10-16 01:25:01 -07001789/*
1790 * If a page has any new buffers, zero them out here, and mark them uptodate
1791 * and dirty so they'll be written out (in order to prevent uninitialised
1792 * block data from leaking). And clear the new bit.
1793 */
1794void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1795{
1796 unsigned int block_start, block_end;
1797 struct buffer_head *head, *bh;
1798
1799 BUG_ON(!PageLocked(page));
1800 if (!page_has_buffers(page))
1801 return;
1802
1803 bh = head = page_buffers(page);
1804 block_start = 0;
1805 do {
1806 block_end = block_start + bh->b_size;
1807
1808 if (buffer_new(bh)) {
1809 if (block_end > from && block_start < to) {
1810 if (!PageUptodate(page)) {
1811 unsigned start, size;
1812
1813 start = max(from, block_start);
1814 size = min(to, block_end) - start;
1815
Christoph Lametereebd2aa2008-02-04 22:28:29 -08001816 zero_user(page, start, size);
Nick Pigginafddba42007-10-16 01:25:01 -07001817 set_buffer_uptodate(bh);
1818 }
1819
1820 clear_buffer_new(bh);
1821 mark_buffer_dirty(bh);
1822 }
1823 }
1824
1825 block_start = block_end;
1826 bh = bh->b_this_page;
1827 } while (bh != head);
1828}
1829EXPORT_SYMBOL(page_zero_new_buffers);
1830
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831static int __block_prepare_write(struct inode *inode, struct page *page,
1832 unsigned from, unsigned to, get_block_t *get_block)
1833{
1834 unsigned block_start, block_end;
1835 sector_t block;
1836 int err = 0;
1837 unsigned blocksize, bbits;
1838 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1839
1840 BUG_ON(!PageLocked(page));
1841 BUG_ON(from > PAGE_CACHE_SIZE);
1842 BUG_ON(to > PAGE_CACHE_SIZE);
1843 BUG_ON(from > to);
1844
1845 blocksize = 1 << inode->i_blkbits;
1846 if (!page_has_buffers(page))
1847 create_empty_buffers(page, blocksize, 0);
1848 head = page_buffers(page);
1849
1850 bbits = inode->i_blkbits;
1851 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1852
1853 for(bh = head, block_start = 0; bh != head || !block_start;
1854 block++, block_start=block_end, bh = bh->b_this_page) {
1855 block_end = block_start + blocksize;
1856 if (block_end <= from || block_start >= to) {
1857 if (PageUptodate(page)) {
1858 if (!buffer_uptodate(bh))
1859 set_buffer_uptodate(bh);
1860 }
1861 continue;
1862 }
1863 if (buffer_new(bh))
1864 clear_buffer_new(bh);
1865 if (!buffer_mapped(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001866 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 err = get_block(inode, block, bh, 1);
1868 if (err)
Nick Pigginf3ddbdc2005-05-05 16:15:45 -07001869 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 if (buffer_new(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 unmap_underlying_metadata(bh->b_bdev,
1872 bh->b_blocknr);
1873 if (PageUptodate(page)) {
Nick Piggin637aff42007-10-16 01:25:00 -07001874 clear_buffer_new(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875 set_buffer_uptodate(bh);
Nick Piggin637aff42007-10-16 01:25:00 -07001876 mark_buffer_dirty(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 continue;
1878 }
Christoph Lametereebd2aa2008-02-04 22:28:29 -08001879 if (block_end > to || block_start < from)
1880 zero_user_segments(page,
1881 to, block_end,
1882 block_start, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 continue;
1884 }
1885 }
1886 if (PageUptodate(page)) {
1887 if (!buffer_uptodate(bh))
1888 set_buffer_uptodate(bh);
1889 continue;
1890 }
1891 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
David Chinner33a266d2007-02-12 00:51:41 -08001892 !buffer_unwritten(bh) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 (block_start < from || block_end > to)) {
1894 ll_rw_block(READ, 1, &bh);
1895 *wait_bh++=bh;
1896 }
1897 }
1898 /*
1899 * If we issued read requests - let them complete.
1900 */
1901 while(wait_bh > wait) {
1902 wait_on_buffer(*--wait_bh);
1903 if (!buffer_uptodate(*wait_bh))
Nick Pigginf3ddbdc2005-05-05 16:15:45 -07001904 err = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 }
Nick Pigginafddba42007-10-16 01:25:01 -07001906 if (unlikely(err))
1907 page_zero_new_buffers(page, from, to);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 return err;
1909}
1910
1911static int __block_commit_write(struct inode *inode, struct page *page,
1912 unsigned from, unsigned to)
1913{
1914 unsigned block_start, block_end;
1915 int partial = 0;
1916 unsigned blocksize;
1917 struct buffer_head *bh, *head;
1918
1919 blocksize = 1 << inode->i_blkbits;
1920
1921 for(bh = head = page_buffers(page), block_start = 0;
1922 bh != head || !block_start;
1923 block_start=block_end, bh = bh->b_this_page) {
1924 block_end = block_start + blocksize;
1925 if (block_end <= from || block_start >= to) {
1926 if (!buffer_uptodate(bh))
1927 partial = 1;
1928 } else {
1929 set_buffer_uptodate(bh);
1930 mark_buffer_dirty(bh);
1931 }
Nick Pigginafddba42007-10-16 01:25:01 -07001932 clear_buffer_new(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 }
1934
1935 /*
1936 * If this is a partial write which happened to make all buffers
1937 * uptodate then we can optimize away a bogus readpage() for
1938 * the next read(). Here we 'discover' whether the page went
1939 * uptodate as a result of this (potentially partial) write.
1940 */
1941 if (!partial)
1942 SetPageUptodate(page);
1943 return 0;
1944}
1945
1946/*
Nick Pigginafddba42007-10-16 01:25:01 -07001947 * block_write_begin takes care of the basic task of block allocation and
1948 * bringing partial write blocks uptodate first.
1949 *
1950 * If *pagep is not NULL, then block_write_begin uses the locked page
1951 * at *pagep rather than allocating its own. In this case, the page will
1952 * not be unlocked or deallocated on failure.
1953 */
1954int block_write_begin(struct file *file, struct address_space *mapping,
1955 loff_t pos, unsigned len, unsigned flags,
1956 struct page **pagep, void **fsdata,
1957 get_block_t *get_block)
1958{
1959 struct inode *inode = mapping->host;
1960 int status = 0;
1961 struct page *page;
1962 pgoff_t index;
1963 unsigned start, end;
1964 int ownpage = 0;
1965
1966 index = pos >> PAGE_CACHE_SHIFT;
1967 start = pos & (PAGE_CACHE_SIZE - 1);
1968 end = start + len;
1969
1970 page = *pagep;
1971 if (page == NULL) {
1972 ownpage = 1;
Nick Piggin54566b22009-01-04 12:00:53 -08001973 page = grab_cache_page_write_begin(mapping, index, flags);
Nick Pigginafddba42007-10-16 01:25:01 -07001974 if (!page) {
1975 status = -ENOMEM;
1976 goto out;
1977 }
1978 *pagep = page;
1979 } else
1980 BUG_ON(!PageLocked(page));
1981
1982 status = __block_prepare_write(inode, page, start, end, get_block);
1983 if (unlikely(status)) {
1984 ClearPageUptodate(page);
1985
1986 if (ownpage) {
1987 unlock_page(page);
1988 page_cache_release(page);
1989 *pagep = NULL;
1990
1991 /*
1992 * prepare_write() may have instantiated a few blocks
1993 * outside i_size. Trim these off again. Don't need
1994 * i_size_read because we hold i_mutex.
1995 */
1996 if (pos + len > inode->i_size)
1997 vmtruncate(inode, inode->i_size);
1998 }
Nick Pigginafddba42007-10-16 01:25:01 -07001999 }
2000
2001out:
2002 return status;
2003}
2004EXPORT_SYMBOL(block_write_begin);
2005
2006int block_write_end(struct file *file, struct address_space *mapping,
2007 loff_t pos, unsigned len, unsigned copied,
2008 struct page *page, void *fsdata)
2009{
2010 struct inode *inode = mapping->host;
2011 unsigned start;
2012
2013 start = pos & (PAGE_CACHE_SIZE - 1);
2014
2015 if (unlikely(copied < len)) {
2016 /*
2017 * The buffers that were written will now be uptodate, so we
2018 * don't have to worry about a readpage reading them and
2019 * overwriting a partial write. However if we have encountered
2020 * a short write and only partially written into a buffer, it
2021 * will not be marked uptodate, so a readpage might come in and
2022 * destroy our partial write.
2023 *
2024 * Do the simplest thing, and just treat any short write to a
2025 * non uptodate page as a zero-length write, and force the
2026 * caller to redo the whole thing.
2027 */
2028 if (!PageUptodate(page))
2029 copied = 0;
2030
2031 page_zero_new_buffers(page, start+copied, start+len);
2032 }
2033 flush_dcache_page(page);
2034
2035 /* This could be a short (even 0-length) commit */
2036 __block_commit_write(inode, page, start, start+copied);
2037
2038 return copied;
2039}
2040EXPORT_SYMBOL(block_write_end);
2041
2042int generic_write_end(struct file *file, struct address_space *mapping,
2043 loff_t pos, unsigned len, unsigned copied,
2044 struct page *page, void *fsdata)
2045{
2046 struct inode *inode = mapping->host;
Jan Karac7d206b2008-07-11 19:27:31 -04002047 int i_size_changed = 0;
Nick Pigginafddba42007-10-16 01:25:01 -07002048
2049 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2050
2051 /*
2052 * No need to use i_size_read() here, the i_size
2053 * cannot change under us because we hold i_mutex.
2054 *
2055 * But it's important to update i_size while still holding page lock:
2056 * page writeout could otherwise come in and zero beyond i_size.
2057 */
2058 if (pos+copied > inode->i_size) {
2059 i_size_write(inode, pos+copied);
Jan Karac7d206b2008-07-11 19:27:31 -04002060 i_size_changed = 1;
Nick Pigginafddba42007-10-16 01:25:01 -07002061 }
2062
2063 unlock_page(page);
2064 page_cache_release(page);
2065
Jan Karac7d206b2008-07-11 19:27:31 -04002066 /*
2067 * Don't mark the inode dirty under page lock. First, it unnecessarily
2068 * makes the holding time of page lock longer. Second, it forces lock
2069 * ordering of page lock and transaction start for journaling
2070 * filesystems.
2071 */
2072 if (i_size_changed)
2073 mark_inode_dirty(inode);
2074
Nick Pigginafddba42007-10-16 01:25:01 -07002075 return copied;
2076}
2077EXPORT_SYMBOL(generic_write_end);
2078
2079/*
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07002080 * block_is_partially_uptodate checks whether buffers within a page are
2081 * uptodate or not.
2082 *
2083 * Returns true if all buffers which correspond to a file portion
2084 * we want to read are uptodate.
2085 */
2086int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2087 unsigned long from)
2088{
2089 struct inode *inode = page->mapping->host;
2090 unsigned block_start, block_end, blocksize;
2091 unsigned to;
2092 struct buffer_head *bh, *head;
2093 int ret = 1;
2094
2095 if (!page_has_buffers(page))
2096 return 0;
2097
2098 blocksize = 1 << inode->i_blkbits;
2099 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2100 to = from + to;
2101 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2102 return 0;
2103
2104 head = page_buffers(page);
2105 bh = head;
2106 block_start = 0;
2107 do {
2108 block_end = block_start + blocksize;
2109 if (block_end > from && block_start < to) {
2110 if (!buffer_uptodate(bh)) {
2111 ret = 0;
2112 break;
2113 }
2114 if (block_end >= to)
2115 break;
2116 }
2117 block_start = block_end;
2118 bh = bh->b_this_page;
2119 } while (bh != head);
2120
2121 return ret;
2122}
2123EXPORT_SYMBOL(block_is_partially_uptodate);
2124
2125/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 * Generic "read page" function for block devices that have the normal
2127 * get_block functionality. This is most of the block device filesystems.
2128 * Reads the page asynchronously --- the unlock_buffer() and
2129 * set/clear_buffer_uptodate() functions propagate buffer state into the
2130 * page struct once IO has completed.
2131 */
2132int block_read_full_page(struct page *page, get_block_t *get_block)
2133{
2134 struct inode *inode = page->mapping->host;
2135 sector_t iblock, lblock;
2136 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2137 unsigned int blocksize;
2138 int nr, i;
2139 int fully_mapped = 1;
2140
Matt Mackallcd7619d2005-05-01 08:59:01 -07002141 BUG_ON(!PageLocked(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 blocksize = 1 << inode->i_blkbits;
2143 if (!page_has_buffers(page))
2144 create_empty_buffers(page, blocksize, 0);
2145 head = page_buffers(page);
2146
2147 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2148 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2149 bh = head;
2150 nr = 0;
2151 i = 0;
2152
2153 do {
2154 if (buffer_uptodate(bh))
2155 continue;
2156
2157 if (!buffer_mapped(bh)) {
Andrew Mortonc64610b2005-05-16 21:53:49 -07002158 int err = 0;
2159
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 fully_mapped = 0;
2161 if (iblock < lblock) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002162 WARN_ON(bh->b_size != blocksize);
Andrew Mortonc64610b2005-05-16 21:53:49 -07002163 err = get_block(inode, iblock, bh, 0);
2164 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 SetPageError(page);
2166 }
2167 if (!buffer_mapped(bh)) {
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002168 zero_user(page, i * blocksize, blocksize);
Andrew Mortonc64610b2005-05-16 21:53:49 -07002169 if (!err)
2170 set_buffer_uptodate(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 continue;
2172 }
2173 /*
2174 * get_block() might have updated the buffer
2175 * synchronously
2176 */
2177 if (buffer_uptodate(bh))
2178 continue;
2179 }
2180 arr[nr++] = bh;
2181 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2182
2183 if (fully_mapped)
2184 SetPageMappedToDisk(page);
2185
2186 if (!nr) {
2187 /*
2188 * All buffers are uptodate - we can set the page uptodate
2189 * as well. But not if get_block() returned an error.
2190 */
2191 if (!PageError(page))
2192 SetPageUptodate(page);
2193 unlock_page(page);
2194 return 0;
2195 }
2196
2197 /* Stage two: lock the buffers */
2198 for (i = 0; i < nr; i++) {
2199 bh = arr[i];
2200 lock_buffer(bh);
2201 mark_buffer_async_read(bh);
2202 }
2203
2204 /*
2205 * Stage 3: start the IO. Check for uptodateness
2206 * inside the buffer lock in case another process reading
2207 * the underlying blockdev brought it uptodate (the sct fix).
2208 */
2209 for (i = 0; i < nr; i++) {
2210 bh = arr[i];
2211 if (buffer_uptodate(bh))
2212 end_buffer_async_read(bh, 1);
2213 else
2214 submit_bh(READ, bh);
2215 }
2216 return 0;
2217}
2218
2219/* utility function for filesystems that need to do work on expanding
Nick Piggin89e10782007-10-16 01:25:07 -07002220 * truncates. Uses filesystem pagecache writes to allow the filesystem to
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 * deal with the hole.
2222 */
Nick Piggin89e10782007-10-16 01:25:07 -07002223int generic_cont_expand_simple(struct inode *inode, loff_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224{
2225 struct address_space *mapping = inode->i_mapping;
2226 struct page *page;
Nick Piggin89e10782007-10-16 01:25:07 -07002227 void *fsdata;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002228 unsigned long limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 int err;
2230
2231 err = -EFBIG;
2232 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2233 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2234 send_sig(SIGXFSZ, current, 0);
2235 goto out;
2236 }
2237 if (size > inode->i_sb->s_maxbytes)
2238 goto out;
2239
Nick Piggin89e10782007-10-16 01:25:07 -07002240 err = pagecache_write_begin(NULL, mapping, size, 0,
2241 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2242 &page, &fsdata);
2243 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 goto out;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002245
Nick Piggin89e10782007-10-16 01:25:07 -07002246 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2247 BUG_ON(err > 0);
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002248
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249out:
2250 return err;
2251}
2252
Adrian Bunkf1e3af72008-04-29 00:59:01 -07002253static int cont_expand_zero(struct file *file, struct address_space *mapping,
2254 loff_t pos, loff_t *bytes)
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002255{
Nick Piggin89e10782007-10-16 01:25:07 -07002256 struct inode *inode = mapping->host;
2257 unsigned blocksize = 1 << inode->i_blkbits;
2258 struct page *page;
2259 void *fsdata;
2260 pgoff_t index, curidx;
2261 loff_t curpos;
2262 unsigned zerofrom, offset, len;
2263 int err = 0;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002264
Nick Piggin89e10782007-10-16 01:25:07 -07002265 index = pos >> PAGE_CACHE_SHIFT;
2266 offset = pos & ~PAGE_CACHE_MASK;
2267
2268 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2269 zerofrom = curpos & ~PAGE_CACHE_MASK;
2270 if (zerofrom & (blocksize-1)) {
2271 *bytes |= (blocksize-1);
2272 (*bytes)++;
2273 }
2274 len = PAGE_CACHE_SIZE - zerofrom;
2275
2276 err = pagecache_write_begin(file, mapping, curpos, len,
2277 AOP_FLAG_UNINTERRUPTIBLE,
2278 &page, &fsdata);
2279 if (err)
2280 goto out;
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002281 zero_user(page, zerofrom, len);
Nick Piggin89e10782007-10-16 01:25:07 -07002282 err = pagecache_write_end(file, mapping, curpos, len, len,
2283 page, fsdata);
2284 if (err < 0)
2285 goto out;
2286 BUG_ON(err != len);
2287 err = 0;
OGAWA Hirofumi061e9742008-04-28 02:16:28 -07002288
2289 balance_dirty_pages_ratelimited(mapping);
Nick Piggin89e10782007-10-16 01:25:07 -07002290 }
2291
2292 /* page covers the boundary, find the boundary offset */
2293 if (index == curidx) {
2294 zerofrom = curpos & ~PAGE_CACHE_MASK;
2295 /* if we will expand the thing last block will be filled */
2296 if (offset <= zerofrom) {
2297 goto out;
2298 }
2299 if (zerofrom & (blocksize-1)) {
2300 *bytes |= (blocksize-1);
2301 (*bytes)++;
2302 }
2303 len = offset - zerofrom;
2304
2305 err = pagecache_write_begin(file, mapping, curpos, len,
2306 AOP_FLAG_UNINTERRUPTIBLE,
2307 &page, &fsdata);
2308 if (err)
2309 goto out;
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002310 zero_user(page, zerofrom, len);
Nick Piggin89e10782007-10-16 01:25:07 -07002311 err = pagecache_write_end(file, mapping, curpos, len, len,
2312 page, fsdata);
2313 if (err < 0)
2314 goto out;
2315 BUG_ON(err != len);
2316 err = 0;
2317 }
2318out:
2319 return err;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002320}
2321
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322/*
2323 * For moronic filesystems that do not allow holes in file.
2324 * We may have to extend the file.
2325 */
Nick Piggin89e10782007-10-16 01:25:07 -07002326int cont_write_begin(struct file *file, struct address_space *mapping,
2327 loff_t pos, unsigned len, unsigned flags,
2328 struct page **pagep, void **fsdata,
2329 get_block_t *get_block, loff_t *bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 struct inode *inode = mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332 unsigned blocksize = 1 << inode->i_blkbits;
Nick Piggin89e10782007-10-16 01:25:07 -07002333 unsigned zerofrom;
2334 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335
Nick Piggin89e10782007-10-16 01:25:07 -07002336 err = cont_expand_zero(file, mapping, pos, bytes);
2337 if (err)
2338 goto out;
2339
2340 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2341 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2342 *bytes |= (blocksize-1);
2343 (*bytes)++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 }
2345
Nick Piggin89e10782007-10-16 01:25:07 -07002346 *pagep = NULL;
2347 err = block_write_begin(file, mapping, pos, len,
2348 flags, pagep, fsdata, get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349out:
Nick Piggin89e10782007-10-16 01:25:07 -07002350 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351}
2352
2353int block_prepare_write(struct page *page, unsigned from, unsigned to,
2354 get_block_t *get_block)
2355{
2356 struct inode *inode = page->mapping->host;
2357 int err = __block_prepare_write(inode, page, from, to, get_block);
2358 if (err)
2359 ClearPageUptodate(page);
2360 return err;
2361}
2362
2363int block_commit_write(struct page *page, unsigned from, unsigned to)
2364{
2365 struct inode *inode = page->mapping->host;
2366 __block_commit_write(inode,page,from,to);
2367 return 0;
2368}
2369
David Chinner54171692007-07-19 17:39:55 +10002370/*
2371 * block_page_mkwrite() is not allowed to change the file size as it gets
2372 * called from a page fault handler when a page is first dirtied. Hence we must
2373 * be careful to check for EOF conditions here. We set the page up correctly
2374 * for a written page which means we get ENOSPC checking when writing into
2375 * holes and correct delalloc and unwritten extent mapping on filesystems that
2376 * support these features.
2377 *
2378 * We are not allowed to take the i_mutex here so we have to play games to
2379 * protect against truncate races as the page could now be beyond EOF. Because
2380 * vmtruncate() writes the inode size before removing pages, once we have the
2381 * page lock we can determine safely if the page is beyond EOF. If it is not
2382 * beyond EOF, then the page is guaranteed safe against truncation until we
2383 * unlock the page.
2384 */
2385int
Nick Pigginc2ec1752009-03-31 15:23:21 -07002386block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
David Chinner54171692007-07-19 17:39:55 +10002387 get_block_t get_block)
2388{
Nick Pigginc2ec1752009-03-31 15:23:21 -07002389 struct page *page = vmf->page;
David Chinner54171692007-07-19 17:39:55 +10002390 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2391 unsigned long end;
2392 loff_t size;
Nick Piggin56a76f82009-03-31 15:23:23 -07002393 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
David Chinner54171692007-07-19 17:39:55 +10002394
2395 lock_page(page);
2396 size = i_size_read(inode);
2397 if ((page->mapping != inode->i_mapping) ||
Nick Piggin18336332007-07-20 00:31:45 -07002398 (page_offset(page) > size)) {
David Chinner54171692007-07-19 17:39:55 +10002399 /* page got truncated out from underneath us */
Nick Pigginb827e492009-04-30 15:08:16 -07002400 unlock_page(page);
2401 goto out;
David Chinner54171692007-07-19 17:39:55 +10002402 }
2403
2404 /* page is wholly or partially inside EOF */
2405 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2406 end = size & ~PAGE_CACHE_MASK;
2407 else
2408 end = PAGE_CACHE_SIZE;
2409
2410 ret = block_prepare_write(page, 0, end, get_block);
2411 if (!ret)
2412 ret = block_commit_write(page, 0, end);
2413
Nick Piggin56a76f82009-03-31 15:23:23 -07002414 if (unlikely(ret)) {
Nick Pigginb827e492009-04-30 15:08:16 -07002415 unlock_page(page);
Nick Piggin56a76f82009-03-31 15:23:23 -07002416 if (ret == -ENOMEM)
2417 ret = VM_FAULT_OOM;
2418 else /* -ENOSPC, -EIO, etc */
2419 ret = VM_FAULT_SIGBUS;
Nick Pigginb827e492009-04-30 15:08:16 -07002420 } else
2421 ret = VM_FAULT_LOCKED;
Nick Pigginc2ec1752009-03-31 15:23:21 -07002422
Nick Pigginb827e492009-04-30 15:08:16 -07002423out:
David Chinner54171692007-07-19 17:39:55 +10002424 return ret;
2425}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426
2427/*
Nick Piggin03158cd2007-10-16 01:25:25 -07002428 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 * immediately, while under the page lock. So it needs a special end_io
2430 * handler which does not touch the bh after unlocking it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 */
2432static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2433{
Dmitry Monakhov68671f32007-10-16 01:24:47 -07002434 __end_buffer_read_notouch(bh, uptodate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435}
2436
2437/*
Nick Piggin03158cd2007-10-16 01:25:25 -07002438 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2439 * the page (converting it to circular linked list and taking care of page
2440 * dirty races).
2441 */
2442static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2443{
2444 struct buffer_head *bh;
2445
2446 BUG_ON(!PageLocked(page));
2447
2448 spin_lock(&page->mapping->private_lock);
2449 bh = head;
2450 do {
2451 if (PageDirty(page))
2452 set_buffer_dirty(bh);
2453 if (!bh->b_this_page)
2454 bh->b_this_page = head;
2455 bh = bh->b_this_page;
2456 } while (bh != head);
2457 attach_page_buffers(page, head);
2458 spin_unlock(&page->mapping->private_lock);
2459}
2460
2461/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462 * On entry, the page is fully not uptodate.
2463 * On exit the page is fully uptodate in the areas outside (from,to)
2464 */
Nick Piggin03158cd2007-10-16 01:25:25 -07002465int nobh_write_begin(struct file *file, struct address_space *mapping,
2466 loff_t pos, unsigned len, unsigned flags,
2467 struct page **pagep, void **fsdata,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 get_block_t *get_block)
2469{
Nick Piggin03158cd2007-10-16 01:25:25 -07002470 struct inode *inode = mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471 const unsigned blkbits = inode->i_blkbits;
2472 const unsigned blocksize = 1 << blkbits;
Nick Piggina4b06722007-10-16 01:24:48 -07002473 struct buffer_head *head, *bh;
Nick Piggin03158cd2007-10-16 01:25:25 -07002474 struct page *page;
2475 pgoff_t index;
2476 unsigned from, to;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477 unsigned block_in_page;
Nick Piggina4b06722007-10-16 01:24:48 -07002478 unsigned block_start, block_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479 sector_t block_in_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480 int nr_reads = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 int ret = 0;
2482 int is_mapped_to_disk = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483
Nick Piggin03158cd2007-10-16 01:25:25 -07002484 index = pos >> PAGE_CACHE_SHIFT;
2485 from = pos & (PAGE_CACHE_SIZE - 1);
2486 to = from + len;
2487
Nick Piggin54566b22009-01-04 12:00:53 -08002488 page = grab_cache_page_write_begin(mapping, index, flags);
Nick Piggin03158cd2007-10-16 01:25:25 -07002489 if (!page)
2490 return -ENOMEM;
2491 *pagep = page;
2492 *fsdata = NULL;
2493
2494 if (page_has_buffers(page)) {
2495 unlock_page(page);
2496 page_cache_release(page);
2497 *pagep = NULL;
2498 return block_write_begin(file, mapping, pos, len, flags, pagep,
2499 fsdata, get_block);
2500 }
Nick Piggina4b06722007-10-16 01:24:48 -07002501
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502 if (PageMappedToDisk(page))
2503 return 0;
2504
Nick Piggina4b06722007-10-16 01:24:48 -07002505 /*
2506 * Allocate buffers so that we can keep track of state, and potentially
2507 * attach them to the page if an error occurs. In the common case of
2508 * no error, they will just be freed again without ever being attached
2509 * to the page (which is all OK, because we're under the page lock).
2510 *
2511 * Be careful: the buffer linked list is a NULL terminated one, rather
2512 * than the circular one we're used to.
2513 */
2514 head = alloc_page_buffers(page, blocksize, 0);
Nick Piggin03158cd2007-10-16 01:25:25 -07002515 if (!head) {
2516 ret = -ENOMEM;
2517 goto out_release;
2518 }
Nick Piggina4b06722007-10-16 01:24:48 -07002519
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521
2522 /*
2523 * We loop across all blocks in the page, whether or not they are
2524 * part of the affected region. This is so we can discover if the
2525 * page is fully mapped-to-disk.
2526 */
Nick Piggina4b06722007-10-16 01:24:48 -07002527 for (block_start = 0, block_in_page = 0, bh = head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 block_start < PAGE_CACHE_SIZE;
Nick Piggina4b06722007-10-16 01:24:48 -07002529 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 int create;
2531
Nick Piggina4b06722007-10-16 01:24:48 -07002532 block_end = block_start + blocksize;
2533 bh->b_state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534 create = 1;
2535 if (block_start >= to)
2536 create = 0;
2537 ret = get_block(inode, block_in_file + block_in_page,
Nick Piggina4b06722007-10-16 01:24:48 -07002538 bh, create);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 if (ret)
2540 goto failed;
Nick Piggina4b06722007-10-16 01:24:48 -07002541 if (!buffer_mapped(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 is_mapped_to_disk = 0;
Nick Piggina4b06722007-10-16 01:24:48 -07002543 if (buffer_new(bh))
2544 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2545 if (PageUptodate(page)) {
2546 set_buffer_uptodate(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 continue;
Nick Piggina4b06722007-10-16 01:24:48 -07002548 }
2549 if (buffer_new(bh) || !buffer_mapped(bh)) {
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002550 zero_user_segments(page, block_start, from,
2551 to, block_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 continue;
2553 }
Nick Piggina4b06722007-10-16 01:24:48 -07002554 if (buffer_uptodate(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555 continue; /* reiserfs does this */
2556 if (block_start < from || block_end > to) {
Nick Piggina4b06722007-10-16 01:24:48 -07002557 lock_buffer(bh);
2558 bh->b_end_io = end_buffer_read_nobh;
2559 submit_bh(READ, bh);
2560 nr_reads++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561 }
2562 }
2563
2564 if (nr_reads) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565 /*
2566 * The page is locked, so these buffers are protected from
2567 * any VM or truncate activity. Hence we don't need to care
2568 * for the buffer_head refcounts.
2569 */
Nick Piggina4b06722007-10-16 01:24:48 -07002570 for (bh = head; bh; bh = bh->b_this_page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571 wait_on_buffer(bh);
2572 if (!buffer_uptodate(bh))
2573 ret = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574 }
2575 if (ret)
2576 goto failed;
2577 }
2578
2579 if (is_mapped_to_disk)
2580 SetPageMappedToDisk(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581
Nick Piggin03158cd2007-10-16 01:25:25 -07002582 *fsdata = head; /* to be released by nobh_write_end */
Nick Piggina4b06722007-10-16 01:24:48 -07002583
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 return 0;
2585
2586failed:
Nick Piggin03158cd2007-10-16 01:25:25 -07002587 BUG_ON(!ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 /*
Nick Piggina4b06722007-10-16 01:24:48 -07002589 * Error recovery is a bit difficult. We need to zero out blocks that
2590 * were newly allocated, and dirty them to ensure they get written out.
2591 * Buffers need to be attached to the page at this point, otherwise
2592 * the handling of potential IO errors during writeout would be hard
2593 * (could try doing synchronous writeout, but what if that fails too?)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594 */
Nick Piggin03158cd2007-10-16 01:25:25 -07002595 attach_nobh_buffers(page, head);
2596 page_zero_new_buffers(page, from, to);
Nick Piggina4b06722007-10-16 01:24:48 -07002597
Nick Piggin03158cd2007-10-16 01:25:25 -07002598out_release:
2599 unlock_page(page);
2600 page_cache_release(page);
2601 *pagep = NULL;
Nick Piggina4b06722007-10-16 01:24:48 -07002602
Nick Piggin03158cd2007-10-16 01:25:25 -07002603 if (pos + len > inode->i_size)
2604 vmtruncate(inode, inode->i_size);
Nick Piggina4b06722007-10-16 01:24:48 -07002605
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606 return ret;
2607}
Nick Piggin03158cd2007-10-16 01:25:25 -07002608EXPORT_SYMBOL(nobh_write_begin);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609
Nick Piggin03158cd2007-10-16 01:25:25 -07002610int nobh_write_end(struct file *file, struct address_space *mapping,
2611 loff_t pos, unsigned len, unsigned copied,
2612 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613{
2614 struct inode *inode = page->mapping->host;
Nick Pigginefdc3132007-10-21 06:57:41 +02002615 struct buffer_head *head = fsdata;
Nick Piggin03158cd2007-10-16 01:25:25 -07002616 struct buffer_head *bh;
Dmitri Monakhov5b41e742008-03-28 14:15:52 -07002617 BUG_ON(fsdata != NULL && page_has_buffers(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618
Dave Kleikampd4cf1092009-02-06 14:59:26 -06002619 if (unlikely(copied < len) && head)
Dmitri Monakhov5b41e742008-03-28 14:15:52 -07002620 attach_nobh_buffers(page, head);
2621 if (page_has_buffers(page))
2622 return generic_write_end(file, mapping, pos, len,
2623 copied, page, fsdata);
Nick Piggina4b06722007-10-16 01:24:48 -07002624
Nick Piggin22c8ca72007-02-20 13:58:09 -08002625 SetPageUptodate(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 set_page_dirty(page);
Nick Piggin03158cd2007-10-16 01:25:25 -07002627 if (pos+copied > inode->i_size) {
2628 i_size_write(inode, pos+copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629 mark_inode_dirty(inode);
2630 }
Nick Piggin03158cd2007-10-16 01:25:25 -07002631
2632 unlock_page(page);
2633 page_cache_release(page);
2634
Nick Piggin03158cd2007-10-16 01:25:25 -07002635 while (head) {
2636 bh = head;
2637 head = head->b_this_page;
2638 free_buffer_head(bh);
2639 }
2640
2641 return copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642}
Nick Piggin03158cd2007-10-16 01:25:25 -07002643EXPORT_SYMBOL(nobh_write_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644
2645/*
2646 * nobh_writepage() - based on block_full_write_page() except
2647 * that it tries to operate without attaching bufferheads to
2648 * the page.
2649 */
2650int nobh_writepage(struct page *page, get_block_t *get_block,
2651 struct writeback_control *wbc)
2652{
2653 struct inode * const inode = page->mapping->host;
2654 loff_t i_size = i_size_read(inode);
2655 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2656 unsigned offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657 int ret;
2658
2659 /* Is the page fully inside i_size? */
2660 if (page->index < end_index)
2661 goto out;
2662
2663 /* Is the page fully outside i_size? (truncate in progress) */
2664 offset = i_size & (PAGE_CACHE_SIZE-1);
2665 if (page->index >= end_index+1 || !offset) {
2666 /*
2667 * The page may have dirty, unmapped buffers. For example,
2668 * they may have been added in ext3_writepage(). Make them
2669 * freeable here, so the page does not leak.
2670 */
2671#if 0
2672 /* Not really sure about this - do we need this ? */
2673 if (page->mapping->a_ops->invalidatepage)
2674 page->mapping->a_ops->invalidatepage(page, offset);
2675#endif
2676 unlock_page(page);
2677 return 0; /* don't care */
2678 }
2679
2680 /*
2681 * The page straddles i_size. It must be zeroed out on each and every
2682 * writepage invocation because it may be mmapped. "A file is mapped
2683 * in multiples of the page size. For a file that is not a multiple of
2684 * the page size, the remaining memory is zeroed when mapped, and
2685 * writes to that region are not written out to the file."
2686 */
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002687 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688out:
2689 ret = mpage_writepage(page, get_block, wbc);
2690 if (ret == -EAGAIN)
Chris Mason35c80d52009-04-15 13:22:38 -04002691 ret = __block_write_full_page(inode, page, get_block, wbc,
2692 end_buffer_async_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693 return ret;
2694}
2695EXPORT_SYMBOL(nobh_writepage);
2696
Nick Piggin03158cd2007-10-16 01:25:25 -07002697int nobh_truncate_page(struct address_space *mapping,
2698 loff_t from, get_block_t *get_block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2701 unsigned offset = from & (PAGE_CACHE_SIZE-1);
Nick Piggin03158cd2007-10-16 01:25:25 -07002702 unsigned blocksize;
2703 sector_t iblock;
2704 unsigned length, pos;
2705 struct inode *inode = mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706 struct page *page;
Nick Piggin03158cd2007-10-16 01:25:25 -07002707 struct buffer_head map_bh;
2708 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709
Nick Piggin03158cd2007-10-16 01:25:25 -07002710 blocksize = 1 << inode->i_blkbits;
2711 length = offset & (blocksize - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712
Nick Piggin03158cd2007-10-16 01:25:25 -07002713 /* Block boundary? Nothing to do */
2714 if (!length)
2715 return 0;
2716
2717 length = blocksize - length;
2718 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2719
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720 page = grab_cache_page(mapping, index);
Nick Piggin03158cd2007-10-16 01:25:25 -07002721 err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 if (!page)
2723 goto out;
2724
Nick Piggin03158cd2007-10-16 01:25:25 -07002725 if (page_has_buffers(page)) {
2726has_buffers:
2727 unlock_page(page);
2728 page_cache_release(page);
2729 return block_truncate_page(mapping, from, get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 }
Nick Piggin03158cd2007-10-16 01:25:25 -07002731
2732 /* Find the buffer that contains "offset" */
2733 pos = blocksize;
2734 while (offset >= pos) {
2735 iblock++;
2736 pos += blocksize;
2737 }
2738
2739 err = get_block(inode, iblock, &map_bh, 0);
2740 if (err)
2741 goto unlock;
2742 /* unmapped? It's a hole - nothing to do */
2743 if (!buffer_mapped(&map_bh))
2744 goto unlock;
2745
2746 /* Ok, it's mapped. Make sure it's up-to-date */
2747 if (!PageUptodate(page)) {
2748 err = mapping->a_ops->readpage(NULL, page);
2749 if (err) {
2750 page_cache_release(page);
2751 goto out;
2752 }
2753 lock_page(page);
2754 if (!PageUptodate(page)) {
2755 err = -EIO;
2756 goto unlock;
2757 }
2758 if (page_has_buffers(page))
2759 goto has_buffers;
2760 }
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002761 zero_user(page, offset, length);
Nick Piggin03158cd2007-10-16 01:25:25 -07002762 set_page_dirty(page);
2763 err = 0;
2764
2765unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766 unlock_page(page);
2767 page_cache_release(page);
2768out:
Nick Piggin03158cd2007-10-16 01:25:25 -07002769 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770}
2771EXPORT_SYMBOL(nobh_truncate_page);
2772
2773int block_truncate_page(struct address_space *mapping,
2774 loff_t from, get_block_t *get_block)
2775{
2776 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2777 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2778 unsigned blocksize;
Andrew Morton54b21a72006-01-08 01:03:05 -08002779 sector_t iblock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780 unsigned length, pos;
2781 struct inode *inode = mapping->host;
2782 struct page *page;
2783 struct buffer_head *bh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 int err;
2785
2786 blocksize = 1 << inode->i_blkbits;
2787 length = offset & (blocksize - 1);
2788
2789 /* Block boundary? Nothing to do */
2790 if (!length)
2791 return 0;
2792
2793 length = blocksize - length;
Andrew Morton54b21a72006-01-08 01:03:05 -08002794 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795
2796 page = grab_cache_page(mapping, index);
2797 err = -ENOMEM;
2798 if (!page)
2799 goto out;
2800
2801 if (!page_has_buffers(page))
2802 create_empty_buffers(page, blocksize, 0);
2803
2804 /* Find the buffer that contains "offset" */
2805 bh = page_buffers(page);
2806 pos = blocksize;
2807 while (offset >= pos) {
2808 bh = bh->b_this_page;
2809 iblock++;
2810 pos += blocksize;
2811 }
2812
2813 err = 0;
2814 if (!buffer_mapped(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002815 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816 err = get_block(inode, iblock, bh, 0);
2817 if (err)
2818 goto unlock;
2819 /* unmapped? It's a hole - nothing to do */
2820 if (!buffer_mapped(bh))
2821 goto unlock;
2822 }
2823
2824 /* Ok, it's mapped. Make sure it's up-to-date */
2825 if (PageUptodate(page))
2826 set_buffer_uptodate(bh);
2827
David Chinner33a266d2007-02-12 00:51:41 -08002828 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 err = -EIO;
2830 ll_rw_block(READ, 1, &bh);
2831 wait_on_buffer(bh);
2832 /* Uhhuh. Read error. Complain and punt. */
2833 if (!buffer_uptodate(bh))
2834 goto unlock;
2835 }
2836
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002837 zero_user(page, offset, length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 mark_buffer_dirty(bh);
2839 err = 0;
2840
2841unlock:
2842 unlock_page(page);
2843 page_cache_release(page);
2844out:
2845 return err;
2846}
2847
2848/*
2849 * The generic ->writepage function for buffer-backed address_spaces
Chris Mason35c80d52009-04-15 13:22:38 -04002850 * this form passes in the end_io handler used to finish the IO.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851 */
Chris Mason35c80d52009-04-15 13:22:38 -04002852int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2853 struct writeback_control *wbc, bh_end_io_t *handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854{
2855 struct inode * const inode = page->mapping->host;
2856 loff_t i_size = i_size_read(inode);
2857 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2858 unsigned offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859
2860 /* Is the page fully inside i_size? */
2861 if (page->index < end_index)
Chris Mason35c80d52009-04-15 13:22:38 -04002862 return __block_write_full_page(inode, page, get_block, wbc,
2863 handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864
2865 /* Is the page fully outside i_size? (truncate in progress) */
2866 offset = i_size & (PAGE_CACHE_SIZE-1);
2867 if (page->index >= end_index+1 || !offset) {
2868 /*
2869 * The page may have dirty, unmapped buffers. For example,
2870 * they may have been added in ext3_writepage(). Make them
2871 * freeable here, so the page does not leak.
2872 */
Jan Karaaaa40592005-10-30 15:00:16 -08002873 do_invalidatepage(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 unlock_page(page);
2875 return 0; /* don't care */
2876 }
2877
2878 /*
2879 * The page straddles i_size. It must be zeroed out on each and every
2880 * writepage invokation because it may be mmapped. "A file is mapped
2881 * in multiples of the page size. For a file that is not a multiple of
2882 * the page size, the remaining memory is zeroed when mapped, and
2883 * writes to that region are not written out to the file."
2884 */
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002885 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
Chris Mason35c80d52009-04-15 13:22:38 -04002886 return __block_write_full_page(inode, page, get_block, wbc, handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887}
2888
Chris Mason35c80d52009-04-15 13:22:38 -04002889/*
2890 * The generic ->writepage function for buffer-backed address_spaces
2891 */
2892int block_write_full_page(struct page *page, get_block_t *get_block,
2893 struct writeback_control *wbc)
2894{
2895 return block_write_full_page_endio(page, get_block, wbc,
2896 end_buffer_async_write);
2897}
2898
2899
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2901 get_block_t *get_block)
2902{
2903 struct buffer_head tmp;
2904 struct inode *inode = mapping->host;
2905 tmp.b_state = 0;
2906 tmp.b_blocknr = 0;
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002907 tmp.b_size = 1 << inode->i_blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908 get_block(inode, block, &tmp, 0);
2909 return tmp.b_blocknr;
2910}
2911
NeilBrown6712ecf2007-09-27 12:47:43 +02002912static void end_bio_bh_io_sync(struct bio *bio, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913{
2914 struct buffer_head *bh = bio->bi_private;
2915
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 if (err == -EOPNOTSUPP) {
2917 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2918 set_bit(BH_Eopnotsupp, &bh->b_state);
2919 }
2920
Keith Mannthey08bafc02008-11-25 10:24:35 +01002921 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2922 set_bit(BH_Quiet, &bh->b_state);
2923
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2925 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926}
2927
2928int submit_bh(int rw, struct buffer_head * bh)
2929{
2930 struct bio *bio;
2931 int ret = 0;
2932
2933 BUG_ON(!buffer_locked(bh));
2934 BUG_ON(!buffer_mapped(bh));
2935 BUG_ON(!bh->b_end_io);
Aneesh Kumar K.V8fb0e342009-05-12 16:22:37 -04002936 BUG_ON(buffer_delay(bh));
2937 BUG_ON(buffer_unwritten(bh));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938
Jens Axboe48fd4f92008-08-22 10:00:36 +02002939 /*
2940 * Mask in barrier bit for a write (could be either a WRITE or a
2941 * WRITE_SYNC
2942 */
2943 if (buffer_ordered(bh) && (rw & WRITE))
2944 rw |= WRITE_BARRIER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945
2946 /*
Jens Axboe48fd4f92008-08-22 10:00:36 +02002947 * Only clear out a write error when rewriting
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948 */
Jens Axboe48fd4f92008-08-22 10:00:36 +02002949 if (test_set_buffer_req(bh) && (rw & WRITE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950 clear_buffer_write_io_error(bh);
2951
2952 /*
2953 * from here on down, it's all bio -- do the initial mapping,
2954 * submit_bio -> generic_make_request may further map this bio around
2955 */
2956 bio = bio_alloc(GFP_NOIO, 1);
2957
2958 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2959 bio->bi_bdev = bh->b_bdev;
2960 bio->bi_io_vec[0].bv_page = bh->b_page;
2961 bio->bi_io_vec[0].bv_len = bh->b_size;
2962 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2963
2964 bio->bi_vcnt = 1;
2965 bio->bi_idx = 0;
2966 bio->bi_size = bh->b_size;
2967
2968 bio->bi_end_io = end_bio_bh_io_sync;
2969 bio->bi_private = bh;
2970
2971 bio_get(bio);
2972 submit_bio(rw, bio);
2973
2974 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2975 ret = -EOPNOTSUPP;
2976
2977 bio_put(bio);
2978 return ret;
2979}
2980
2981/**
2982 * ll_rw_block: low-level access to block devices (DEPRECATED)
Jan Karaa7662232005-09-06 15:19:10 -07002983 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984 * @nr: number of &struct buffer_heads in the array
2985 * @bhs: array of pointers to &struct buffer_head
2986 *
Jan Karaa7662232005-09-06 15:19:10 -07002987 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2988 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2989 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2990 * are sent to disk. The fourth %READA option is described in the documentation
2991 * for generic_make_request() which ll_rw_block() calls.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992 *
2993 * This function drops any buffer that it cannot get a lock on (with the
Jan Karaa7662232005-09-06 15:19:10 -07002994 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2995 * clean when doing a write request, and any buffer that appears to be
2996 * up-to-date when doing read request. Further it marks as clean buffers that
2997 * are processed for writing (the buffer cache won't assume that they are
2998 * actually clean until the buffer gets unlocked).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999 *
3000 * ll_rw_block sets b_end_io to simple completion handler that marks
3001 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
3002 * any waiters.
3003 *
3004 * All of the buffers must be for the same device, and must also be a
3005 * multiple of the current approved size for the device.
3006 */
3007void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3008{
3009 int i;
3010
3011 for (i = 0; i < nr; i++) {
3012 struct buffer_head *bh = bhs[i];
3013
Jens Axboe9cf6b722009-04-06 14:48:03 +02003014 if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
Jan Karaa7662232005-09-06 15:19:10 -07003015 lock_buffer(bh);
Nick Pigginca5de402008-08-02 12:02:13 +02003016 else if (!trylock_buffer(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017 continue;
3018
Jens Axboe9cf6b722009-04-06 14:48:03 +02003019 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
3020 rw == SWRITE_SYNC_PLUG) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021 if (test_clear_buffer_dirty(bh)) {
akpm@osdl.org76c30732005-04-16 15:24:07 -07003022 bh->b_end_io = end_buffer_write_sync;
OGAWA Hirofumie60e5c52006-02-03 03:04:43 -08003023 get_bh(bh);
Jens Axboe18ce3752008-07-01 09:07:34 +02003024 if (rw == SWRITE_SYNC)
3025 submit_bh(WRITE_SYNC, bh);
3026 else
3027 submit_bh(WRITE, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028 continue;
3029 }
3030 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031 if (!buffer_uptodate(bh)) {
akpm@osdl.org76c30732005-04-16 15:24:07 -07003032 bh->b_end_io = end_buffer_read_sync;
OGAWA Hirofumie60e5c52006-02-03 03:04:43 -08003033 get_bh(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034 submit_bh(rw, bh);
3035 continue;
3036 }
3037 }
3038 unlock_buffer(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039 }
3040}
3041
3042/*
3043 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3044 * and then start new I/O and then wait upon it. The caller must have a ref on
3045 * the buffer_head.
3046 */
3047int sync_dirty_buffer(struct buffer_head *bh)
3048{
3049 int ret = 0;
3050
3051 WARN_ON(atomic_read(&bh->b_count) < 1);
3052 lock_buffer(bh);
3053 if (test_clear_buffer_dirty(bh)) {
3054 get_bh(bh);
3055 bh->b_end_io = end_buffer_write_sync;
Jens Axboe1aa2a7c2009-04-06 14:48:08 +02003056 ret = submit_bh(WRITE_SYNC, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057 wait_on_buffer(bh);
3058 if (buffer_eopnotsupp(bh)) {
3059 clear_buffer_eopnotsupp(bh);
3060 ret = -EOPNOTSUPP;
3061 }
3062 if (!ret && !buffer_uptodate(bh))
3063 ret = -EIO;
3064 } else {
3065 unlock_buffer(bh);
3066 }
3067 return ret;
3068}
3069
3070/*
3071 * try_to_free_buffers() checks if all the buffers on this particular page
3072 * are unused, and releases them if so.
3073 *
3074 * Exclusion against try_to_free_buffers may be obtained by either
3075 * locking the page or by holding its mapping's private_lock.
3076 *
3077 * If the page is dirty but all the buffers are clean then we need to
3078 * be sure to mark the page clean as well. This is because the page
3079 * may be against a block device, and a later reattachment of buffers
3080 * to a dirty page will set *all* buffers dirty. Which would corrupt
3081 * filesystem data on the same device.
3082 *
3083 * The same applies to regular filesystem pages: if all the buffers are
3084 * clean then we set the page clean and proceed. To do that, we require
3085 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3086 * private_lock.
3087 *
3088 * try_to_free_buffers() is non-blocking.
3089 */
3090static inline int buffer_busy(struct buffer_head *bh)
3091{
3092 return atomic_read(&bh->b_count) |
3093 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3094}
3095
3096static int
3097drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3098{
3099 struct buffer_head *head = page_buffers(page);
3100 struct buffer_head *bh;
3101
3102 bh = head;
3103 do {
akpm@osdl.orgde7d5a32005-05-01 08:58:39 -07003104 if (buffer_write_io_error(bh) && page->mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003105 set_bit(AS_EIO, &page->mapping->flags);
3106 if (buffer_busy(bh))
3107 goto failed;
3108 bh = bh->b_this_page;
3109 } while (bh != head);
3110
3111 do {
3112 struct buffer_head *next = bh->b_this_page;
3113
Jan Kara535ee2f2008-02-08 04:21:59 -08003114 if (bh->b_assoc_map)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003115 __remove_assoc_queue(bh);
3116 bh = next;
3117 } while (bh != head);
3118 *buffers_to_free = head;
3119 __clear_page_buffers(page);
3120 return 1;
3121failed:
3122 return 0;
3123}
3124
3125int try_to_free_buffers(struct page *page)
3126{
3127 struct address_space * const mapping = page->mapping;
3128 struct buffer_head *buffers_to_free = NULL;
3129 int ret = 0;
3130
3131 BUG_ON(!PageLocked(page));
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003132 if (PageWriteback(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003133 return 0;
3134
3135 if (mapping == NULL) { /* can this still happen? */
3136 ret = drop_buffers(page, &buffers_to_free);
3137 goto out;
3138 }
3139
3140 spin_lock(&mapping->private_lock);
3141 ret = drop_buffers(page, &buffers_to_free);
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003142
3143 /*
3144 * If the filesystem writes its buffers by hand (eg ext3)
3145 * then we can have clean buffers against a dirty page. We
3146 * clean the page here; otherwise the VM will never notice
3147 * that the filesystem did any IO at all.
3148 *
3149 * Also, during truncate, discard_buffer will have marked all
3150 * the page's buffers clean. We discover that here and clean
3151 * the page also.
Nick Piggin87df7242007-01-30 14:36:27 +11003152 *
3153 * private_lock must be held over this entire operation in order
3154 * to synchronise against __set_page_dirty_buffers and prevent the
3155 * dirty bit from being lost.
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003156 */
3157 if (ret)
3158 cancel_dirty_page(page, PAGE_CACHE_SIZE);
Nick Piggin87df7242007-01-30 14:36:27 +11003159 spin_unlock(&mapping->private_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160out:
3161 if (buffers_to_free) {
3162 struct buffer_head *bh = buffers_to_free;
3163
3164 do {
3165 struct buffer_head *next = bh->b_this_page;
3166 free_buffer_head(bh);
3167 bh = next;
3168 } while (bh != buffers_to_free);
3169 }
3170 return ret;
3171}
3172EXPORT_SYMBOL(try_to_free_buffers);
3173
NeilBrown3978d712006-03-26 01:37:17 -08003174void block_sync_page(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175{
3176 struct address_space *mapping;
3177
3178 smp_mb();
3179 mapping = page_mapping(page);
3180 if (mapping)
3181 blk_run_backing_dev(mapping->backing_dev_info, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182}
3183
3184/*
3185 * There are no bdflush tunables left. But distributions are
3186 * still running obsolete flush daemons, so we terminate them here.
3187 *
3188 * Use of bdflush() is deprecated and will be removed in a future kernel.
3189 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3190 */
Heiko Carstensbdc480e2009-01-14 14:14:12 +01003191SYSCALL_DEFINE2(bdflush, int, func, long, data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003192{
3193 static int msg_count;
3194
3195 if (!capable(CAP_SYS_ADMIN))
3196 return -EPERM;
3197
3198 if (msg_count < 5) {
3199 msg_count++;
3200 printk(KERN_INFO
3201 "warning: process `%s' used the obsolete bdflush"
3202 " system call\n", current->comm);
3203 printk(KERN_INFO "Fix your initscripts?\n");
3204 }
3205
3206 if (func == 1)
3207 do_exit(0);
3208 return 0;
3209}
3210
3211/*
3212 * Buffer-head allocation
3213 */
Christoph Lametere18b8902006-12-06 20:33:20 -08003214static struct kmem_cache *bh_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215
3216/*
3217 * Once the number of bh's in the machine exceeds this level, we start
3218 * stripping them in writeback.
3219 */
3220static int max_buffer_heads;
3221
3222int buffer_heads_over_limit;
3223
3224struct bh_accounting {
3225 int nr; /* Number of live bh's */
3226 int ratelimit; /* Limit cacheline bouncing */
3227};
3228
3229static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3230
3231static void recalc_bh_state(void)
3232{
3233 int i;
3234 int tot = 0;
3235
3236 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3237 return;
3238 __get_cpu_var(bh_accounting).ratelimit = 0;
Eric Dumazet8a143422006-03-24 03:18:10 -08003239 for_each_online_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240 tot += per_cpu(bh_accounting, i).nr;
3241 buffer_heads_over_limit = (tot > max_buffer_heads);
3242}
3243
Al Virodd0fc662005-10-07 07:46:04 +01003244struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003245{
Christoph Lameter488514d2008-04-28 02:12:05 -07003246 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247 if (ret) {
Christoph Lametera35afb82007-05-16 22:10:57 -07003248 INIT_LIST_HEAD(&ret->b_assoc_buffers);
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003249 get_cpu_var(bh_accounting).nr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003250 recalc_bh_state();
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003251 put_cpu_var(bh_accounting);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003252 }
3253 return ret;
3254}
3255EXPORT_SYMBOL(alloc_buffer_head);
3256
3257void free_buffer_head(struct buffer_head *bh)
3258{
3259 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3260 kmem_cache_free(bh_cachep, bh);
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003261 get_cpu_var(bh_accounting).nr--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262 recalc_bh_state();
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003263 put_cpu_var(bh_accounting);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264}
3265EXPORT_SYMBOL(free_buffer_head);
3266
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267static void buffer_exit_cpu(int cpu)
3268{
3269 int i;
3270 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3271
3272 for (i = 0; i < BH_LRU_SIZE; i++) {
3273 brelse(b->bhs[i]);
3274 b->bhs[i] = NULL;
3275 }
Eric Dumazet8a143422006-03-24 03:18:10 -08003276 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3277 per_cpu(bh_accounting, cpu).nr = 0;
3278 put_cpu_var(bh_accounting);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003279}
3280
3281static int buffer_cpu_notify(struct notifier_block *self,
3282 unsigned long action, void *hcpu)
3283{
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07003284 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285 buffer_exit_cpu((unsigned long)hcpu);
3286 return NOTIFY_OK;
3287}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288
Aneesh Kumar K.V389d1b02008-01-28 23:58:26 -05003289/**
Randy Dunlapa6b91912008-03-19 17:01:00 -07003290 * bh_uptodate_or_lock - Test whether the buffer is uptodate
Aneesh Kumar K.V389d1b02008-01-28 23:58:26 -05003291 * @bh: struct buffer_head
3292 *
3293 * Return true if the buffer is up-to-date and false,
3294 * with the buffer locked, if not.
3295 */
3296int bh_uptodate_or_lock(struct buffer_head *bh)
3297{
3298 if (!buffer_uptodate(bh)) {
3299 lock_buffer(bh);
3300 if (!buffer_uptodate(bh))
3301 return 0;
3302 unlock_buffer(bh);
3303 }
3304 return 1;
3305}
3306EXPORT_SYMBOL(bh_uptodate_or_lock);
3307
3308/**
Randy Dunlapa6b91912008-03-19 17:01:00 -07003309 * bh_submit_read - Submit a locked buffer for reading
Aneesh Kumar K.V389d1b02008-01-28 23:58:26 -05003310 * @bh: struct buffer_head
3311 *
3312 * Returns zero on success and -EIO on error.
3313 */
3314int bh_submit_read(struct buffer_head *bh)
3315{
3316 BUG_ON(!buffer_locked(bh));
3317
3318 if (buffer_uptodate(bh)) {
3319 unlock_buffer(bh);
3320 return 0;
3321 }
3322
3323 get_bh(bh);
3324 bh->b_end_io = end_buffer_read_sync;
3325 submit_bh(READ, bh);
3326 wait_on_buffer(bh);
3327 if (buffer_uptodate(bh))
3328 return 0;
3329 return -EIO;
3330}
3331EXPORT_SYMBOL(bh_submit_read);
3332
Christoph Lameterb98938c2008-02-04 22:28:36 -08003333static void
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07003334init_buffer_head(void *data)
Christoph Lameterb98938c2008-02-04 22:28:36 -08003335{
3336 struct buffer_head *bh = data;
3337
3338 memset(bh, 0, sizeof(*bh));
3339 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3340}
3341
Linus Torvalds1da177e2005-04-16 15:20:36 -07003342void __init buffer_init(void)
3343{
3344 int nrpages;
3345
Christoph Lameterb98938c2008-02-04 22:28:36 -08003346 bh_cachep = kmem_cache_create("buffer_head",
3347 sizeof(struct buffer_head), 0,
3348 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3349 SLAB_MEM_SPREAD),
3350 init_buffer_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351
3352 /*
3353 * Limit the bh occupancy to 10% of ZONE_NORMAL
3354 */
3355 nrpages = (nr_free_buffer_pages() * 10) / 100;
3356 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3357 hotcpu_notifier(buffer_cpu_notify, 0);
3358}
3359
3360EXPORT_SYMBOL(__bforget);
3361EXPORT_SYMBOL(__brelse);
3362EXPORT_SYMBOL(__wait_on_buffer);
3363EXPORT_SYMBOL(block_commit_write);
3364EXPORT_SYMBOL(block_prepare_write);
David Chinner54171692007-07-19 17:39:55 +10003365EXPORT_SYMBOL(block_page_mkwrite);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366EXPORT_SYMBOL(block_read_full_page);
3367EXPORT_SYMBOL(block_sync_page);
3368EXPORT_SYMBOL(block_truncate_page);
3369EXPORT_SYMBOL(block_write_full_page);
Chris Mason35c80d52009-04-15 13:22:38 -04003370EXPORT_SYMBOL(block_write_full_page_endio);
Nick Piggin89e10782007-10-16 01:25:07 -07003371EXPORT_SYMBOL(cont_write_begin);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003372EXPORT_SYMBOL(end_buffer_read_sync);
3373EXPORT_SYMBOL(end_buffer_write_sync);
Chris Mason35c80d52009-04-15 13:22:38 -04003374EXPORT_SYMBOL(end_buffer_async_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003375EXPORT_SYMBOL(file_fsync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376EXPORT_SYMBOL(generic_block_bmap);
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08003377EXPORT_SYMBOL(generic_cont_expand_simple);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378EXPORT_SYMBOL(init_buffer);
3379EXPORT_SYMBOL(invalidate_bdev);
3380EXPORT_SYMBOL(ll_rw_block);
3381EXPORT_SYMBOL(mark_buffer_dirty);
3382EXPORT_SYMBOL(submit_bh);
3383EXPORT_SYMBOL(sync_dirty_buffer);
3384EXPORT_SYMBOL(unlock_buffer);