blob: 42534f67d71b23514d9c9c20724b420039f5b860 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/kernel.h>
22#include <linux/syscalls.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
Randy Dunlap16f7e0f2006-01-11 12:17:46 -080027#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/blkdev.h>
29#include <linux/file.h>
30#include <linux/quotaops.h>
31#include <linux/highmem.h>
32#include <linux/module.h>
33#include <linux/writeback.h>
34#include <linux/hash.h>
35#include <linux/suspend.h>
36#include <linux/buffer_head.h>
Andrew Morton55e829a2006-12-10 02:19:27 -080037#include <linux/task_io_accounting_ops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/bio.h>
39#include <linux/notifier.h>
40#include <linux/cpu.h>
41#include <linux/bitops.h>
42#include <linux/mpage.h>
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070043#include <linux/bit_spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49inline void
50init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51{
52 bh->b_end_io = handler;
53 bh->b_private = private;
54}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -070055EXPORT_SYMBOL(init_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Jens Axboe7eaceac2011-03-10 08:52:07 +010057static int sleep_on_buffer(void *word)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 io_schedule();
60 return 0;
61}
62
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -080063void __lock_buffer(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -070064{
Jens Axboe7eaceac2011-03-10 08:52:07 +010065 wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 TASK_UNINTERRUPTIBLE);
67}
68EXPORT_SYMBOL(__lock_buffer);
69
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -080070void unlock_buffer(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Nick Piggin51b07fc2008-10-18 20:27:00 -070072 clear_bit_unlock(BH_Lock, &bh->b_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 smp_mb__after_clear_bit();
74 wake_up_bit(&bh->b_state, BH_Lock);
75}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -070076EXPORT_SYMBOL(unlock_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
78/*
79 * Block until a buffer comes unlocked. This doesn't stop it
80 * from becoming locked again - you have to lock it yourself
81 * if you want to preserve its state.
82 */
83void __wait_on_buffer(struct buffer_head * bh)
84{
Jens Axboe7eaceac2011-03-10 08:52:07 +010085 wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -070087EXPORT_SYMBOL(__wait_on_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
89static void
90__clear_page_buffers(struct page *page)
91{
92 ClearPagePrivate(page);
Hugh Dickins4c21e2f2005-10-29 18:16:40 -070093 set_page_private(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 page_cache_release(page);
95}
96
Keith Mannthey08bafc02008-11-25 10:24:35 +010097
98static int quiet_error(struct buffer_head *bh)
99{
100 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
101 return 0;
102 return 1;
103}
104
105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106static void buffer_io_error(struct buffer_head *bh)
107{
108 char b[BDEVNAME_SIZE];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
110 bdevname(bh->b_bdev, b),
111 (unsigned long long)bh->b_blocknr);
112}
113
114/*
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700115 * End-of-IO handler helper function which does not touch the bh after
116 * unlocking it.
117 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
118 * a race there is benign: unlock_buffer() only use the bh's address for
119 * hashing after unlocking the buffer, so it doesn't actually touch the bh
120 * itself.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 */
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700122static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123{
124 if (uptodate) {
125 set_buffer_uptodate(bh);
126 } else {
127 /* This happens, due to failed READA attempts. */
128 clear_buffer_uptodate(bh);
129 }
130 unlock_buffer(bh);
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700131}
132
133/*
134 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
135 * unlock the buffer. This is what ll_rw_block uses too.
136 */
137void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
138{
139 __end_buffer_read_notouch(bh, uptodate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 put_bh(bh);
141}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700142EXPORT_SYMBOL(end_buffer_read_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
144void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
145{
146 char b[BDEVNAME_SIZE];
147
148 if (uptodate) {
149 set_buffer_uptodate(bh);
150 } else {
Christoph Hellwig0edd55f2010-08-18 05:29:23 -0400151 if (!quiet_error(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 buffer_io_error(bh);
153 printk(KERN_WARNING "lost page write due to "
154 "I/O error on %s\n",
155 bdevname(bh->b_bdev, b));
156 }
157 set_buffer_write_io_error(bh);
158 clear_buffer_uptodate(bh);
159 }
160 unlock_buffer(bh);
161 put_bh(bh);
162}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700163EXPORT_SYMBOL(end_buffer_write_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
165/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 * Various filesystems appear to want __find_get_block to be non-blocking.
167 * But it's the page lock which protects the buffers. To get around this,
168 * we get exclusion from try_to_free_buffers with the blockdev mapping's
169 * private_lock.
170 *
171 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
172 * may be quite high. This code could TryLock the page, and if that
173 * succeeds, there is no need to take private_lock. (But if
174 * private_lock is contended then so is mapping->tree_lock).
175 */
176static struct buffer_head *
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -0800177__find_get_block_slow(struct block_device *bdev, sector_t block)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178{
179 struct inode *bd_inode = bdev->bd_inode;
180 struct address_space *bd_mapping = bd_inode->i_mapping;
181 struct buffer_head *ret = NULL;
182 pgoff_t index;
183 struct buffer_head *bh;
184 struct buffer_head *head;
185 struct page *page;
186 int all_mapped = 1;
187
188 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
189 page = find_get_page(bd_mapping, index);
190 if (!page)
191 goto out;
192
193 spin_lock(&bd_mapping->private_lock);
194 if (!page_has_buffers(page))
195 goto out_unlock;
196 head = page_buffers(page);
197 bh = head;
198 do {
Nikanth Karthikesan97f76d32009-04-02 16:56:46 -0700199 if (!buffer_mapped(bh))
200 all_mapped = 0;
201 else if (bh->b_blocknr == block) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 ret = bh;
203 get_bh(bh);
204 goto out_unlock;
205 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 bh = bh->b_this_page;
207 } while (bh != head);
208
209 /* we might be here because some of the buffers on this page are
210 * not mapped. This is due to various races between
211 * file io on the block device and getblk. It gets dealt with
212 * elsewhere, don't buffer_error if we had some unmapped buffers
213 */
214 if (all_mapped) {
215 printk("__find_get_block_slow() failed. "
216 "block=%llu, b_blocknr=%llu\n",
Badari Pulavarty205f87f2006-03-26 01:38:00 -0800217 (unsigned long long)block,
218 (unsigned long long)bh->b_blocknr);
219 printk("b_state=0x%08lx, b_size=%zu\n",
220 bh->b_state, bh->b_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
222 }
223out_unlock:
224 spin_unlock(&bd_mapping->private_lock);
225 page_cache_release(page);
226out:
227 return ret;
228}
229
230/* If invalidate_buffers() will trash dirty buffers, it means some kind
231 of fs corruption is going on. Trashing dirty data always imply losing
232 information that was supposed to be just stored on the physical layer
233 by the user.
234
235 Thus invalidate_buffers in general usage is not allwowed to trash
236 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
237 be preserved. These buffers are simply skipped.
238
239 We also skip buffers which are still in use. For example this can
240 happen if a userspace program is reading the block device.
241
242 NOTE: In the case where the user removed a removable-media-disk even if
243 there's still dirty data not synced on disk (due a bug in the device driver
244 or due an error of the user), by not destroying the dirty buffers we could
245 generate corruption also on the next media inserted, thus a parameter is
246 necessary to handle this case in the most safe way possible (trying
247 to not corrupt also the new disk inserted with the data belonging to
248 the old now corrupted disk). Also for the ramdisk the natural thing
249 to do in order to release the ramdisk memory is to destroy dirty buffers.
250
251 These are two special cases. Normal usage imply the device driver
252 to issue a sync on the device (without waiting I/O completion) and
253 then an invalidate_buffers call that doesn't trash dirty buffers.
254
255 For handling cache coherency with the blkdev pagecache the 'update' case
256 is been introduced. It is needed to re-read from disk any pinned
257 buffer. NOTE: re-reading from disk is destructive so we can do it only
258 when we assume nobody is changing the buffercache under our I/O and when
259 we think the disk contains more recent information than the buffercache.
260 The update == 1 pass marks the buffers we need to update, the update == 2
261 pass does the actual I/O. */
Peter Zijlstraf98393a2007-05-06 14:49:54 -0700262void invalidate_bdev(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263{
Andrew Morton0e1dfc62006-07-30 03:03:28 -0700264 struct address_space *mapping = bdev->bd_inode->i_mapping;
265
266 if (mapping->nrpages == 0)
267 return;
268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 invalidate_bh_lrus();
Tejun Heofa4b9072010-05-15 20:09:27 +0200270 lru_add_drain_all(); /* make sure all lru add caches are flushed */
Andrew Mortonfc0ecff2007-02-10 01:45:39 -0800271 invalidate_mapping_pages(mapping, 0, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700273EXPORT_SYMBOL(invalidate_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
275/*
Jens Axboe5b0830c2009-09-23 19:37:09 +0200276 * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 */
278static void free_more_memory(void)
279{
Mel Gorman19770b32008-04-28 02:12:18 -0700280 struct zone *zone;
Mel Gorman0e884602008-04-28 02:12:14 -0700281 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
Jens Axboe03ba3782009-09-09 09:08:54 +0200283 wakeup_flusher_threads(1024);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 yield();
285
Mel Gorman0e884602008-04-28 02:12:14 -0700286 for_each_online_node(nid) {
Mel Gorman19770b32008-04-28 02:12:18 -0700287 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
288 gfp_zone(GFP_NOFS), NULL,
289 &zone);
290 if (zone)
Mel Gorman54a6eb52008-04-28 02:12:16 -0700291 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
KAMEZAWA Hiroyuki327c0e92009-03-31 15:23:31 -0700292 GFP_NOFS, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 }
294}
295
296/*
297 * I/O completion handler for block_read_full_page() - pages
298 * which come unlocked at the end of I/O.
299 */
300static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
301{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 unsigned long flags;
Nick Piggina3972202005-07-07 17:56:56 -0700303 struct buffer_head *first;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 struct buffer_head *tmp;
305 struct page *page;
306 int page_uptodate = 1;
307
308 BUG_ON(!buffer_async_read(bh));
309
310 page = bh->b_page;
311 if (uptodate) {
312 set_buffer_uptodate(bh);
313 } else {
314 clear_buffer_uptodate(bh);
Keith Mannthey08bafc02008-11-25 10:24:35 +0100315 if (!quiet_error(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 buffer_io_error(bh);
317 SetPageError(page);
318 }
319
320 /*
321 * Be _very_ careful from here on. Bad things can happen if
322 * two buffer heads end IO at almost the same time and both
323 * decide that the page is now completely done.
324 */
Nick Piggina3972202005-07-07 17:56:56 -0700325 first = page_buffers(page);
326 local_irq_save(flags);
327 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 clear_buffer_async_read(bh);
329 unlock_buffer(bh);
330 tmp = bh;
331 do {
332 if (!buffer_uptodate(tmp))
333 page_uptodate = 0;
334 if (buffer_async_read(tmp)) {
335 BUG_ON(!buffer_locked(tmp));
336 goto still_busy;
337 }
338 tmp = tmp->b_this_page;
339 } while (tmp != bh);
Nick Piggina3972202005-07-07 17:56:56 -0700340 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
341 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342
343 /*
344 * If none of the buffers had errors and they are all
345 * uptodate then we can set the page uptodate.
346 */
347 if (page_uptodate && !PageError(page))
348 SetPageUptodate(page);
349 unlock_page(page);
350 return;
351
352still_busy:
Nick Piggina3972202005-07-07 17:56:56 -0700353 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
354 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 return;
356}
357
358/*
359 * Completion handler for block_write_full_page() - pages which are unlocked
360 * during I/O, and which have PageWriteback cleared upon I/O completion.
361 */
Chris Mason35c80d52009-04-15 13:22:38 -0400362void end_buffer_async_write(struct buffer_head *bh, int uptodate)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363{
364 char b[BDEVNAME_SIZE];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 unsigned long flags;
Nick Piggina3972202005-07-07 17:56:56 -0700366 struct buffer_head *first;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 struct buffer_head *tmp;
368 struct page *page;
369
370 BUG_ON(!buffer_async_write(bh));
371
372 page = bh->b_page;
373 if (uptodate) {
374 set_buffer_uptodate(bh);
375 } else {
Keith Mannthey08bafc02008-11-25 10:24:35 +0100376 if (!quiet_error(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 buffer_io_error(bh);
378 printk(KERN_WARNING "lost page write due to "
379 "I/O error on %s\n",
380 bdevname(bh->b_bdev, b));
381 }
382 set_bit(AS_EIO, &page->mapping->flags);
Jan Kara58ff4072006-10-17 00:10:19 -0700383 set_buffer_write_io_error(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 clear_buffer_uptodate(bh);
385 SetPageError(page);
386 }
387
Nick Piggina3972202005-07-07 17:56:56 -0700388 first = page_buffers(page);
389 local_irq_save(flags);
390 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
391
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 clear_buffer_async_write(bh);
393 unlock_buffer(bh);
394 tmp = bh->b_this_page;
395 while (tmp != bh) {
396 if (buffer_async_write(tmp)) {
397 BUG_ON(!buffer_locked(tmp));
398 goto still_busy;
399 }
400 tmp = tmp->b_this_page;
401 }
Nick Piggina3972202005-07-07 17:56:56 -0700402 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
403 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 end_page_writeback(page);
405 return;
406
407still_busy:
Nick Piggina3972202005-07-07 17:56:56 -0700408 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
409 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 return;
411}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700412EXPORT_SYMBOL(end_buffer_async_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
414/*
415 * If a page's buffers are under async readin (end_buffer_async_read
416 * completion) then there is a possibility that another thread of
417 * control could lock one of the buffers after it has completed
418 * but while some of the other buffers have not completed. This
419 * locked buffer would confuse end_buffer_async_read() into not unlocking
420 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
421 * that this buffer is not under async I/O.
422 *
423 * The page comes unlocked when it has no locked buffer_async buffers
424 * left.
425 *
426 * PageLocked prevents anyone starting new async I/O reads any of
427 * the buffers.
428 *
429 * PageWriteback is used to prevent simultaneous writeout of the same
430 * page.
431 *
432 * PageLocked prevents anyone from starting writeback of a page which is
433 * under read I/O (PageWriteback is only ever set against a locked page).
434 */
435static void mark_buffer_async_read(struct buffer_head *bh)
436{
437 bh->b_end_io = end_buffer_async_read;
438 set_buffer_async_read(bh);
439}
440
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700441static void mark_buffer_async_write_endio(struct buffer_head *bh,
442 bh_end_io_t *handler)
Chris Mason35c80d52009-04-15 13:22:38 -0400443{
444 bh->b_end_io = handler;
445 set_buffer_async_write(bh);
446}
447
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448void mark_buffer_async_write(struct buffer_head *bh)
449{
Chris Mason35c80d52009-04-15 13:22:38 -0400450 mark_buffer_async_write_endio(bh, end_buffer_async_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451}
452EXPORT_SYMBOL(mark_buffer_async_write);
453
454
455/*
456 * fs/buffer.c contains helper functions for buffer-backed address space's
457 * fsync functions. A common requirement for buffer-based filesystems is
458 * that certain data from the backing blockdev needs to be written out for
459 * a successful fsync(). For example, ext2 indirect blocks need to be
460 * written back and waited upon before fsync() returns.
461 *
462 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
463 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
464 * management of a list of dependent buffers at ->i_mapping->private_list.
465 *
466 * Locking is a little subtle: try_to_free_buffers() will remove buffers
467 * from their controlling inode's queue when they are being freed. But
468 * try_to_free_buffers() will be operating against the *blockdev* mapping
469 * at the time, not against the S_ISREG file which depends on those buffers.
470 * So the locking for private_list is via the private_lock in the address_space
471 * which backs the buffers. Which is different from the address_space
472 * against which the buffers are listed. So for a particular address_space,
473 * mapping->private_lock does *not* protect mapping->private_list! In fact,
474 * mapping->private_list will always be protected by the backing blockdev's
475 * ->private_lock.
476 *
477 * Which introduces a requirement: all buffers on an address_space's
478 * ->private_list must be from the same address_space: the blockdev's.
479 *
480 * address_spaces which do not place buffers at ->private_list via these
481 * utility functions are free to use private_lock and private_list for
482 * whatever they want. The only requirement is that list_empty(private_list)
483 * be true at clear_inode() time.
484 *
485 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
486 * filesystems should do that. invalidate_inode_buffers() should just go
487 * BUG_ON(!list_empty).
488 *
489 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
490 * take an address_space, not an inode. And it should be called
491 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
492 * queued up.
493 *
494 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
495 * list if it is already on a list. Because if the buffer is on a list,
496 * it *must* already be on the right one. If not, the filesystem is being
497 * silly. This will save a ton of locking. But first we have to ensure
498 * that buffers are taken *off* the old inode's list when they are freed
499 * (presumably in truncate). That requires careful auditing of all
500 * filesystems (do it inside bforget()). It could also be done by bringing
501 * b_inode back.
502 */
503
504/*
505 * The buffer's backing address_space's private_lock must be held
506 */
Thomas Petazzonidbacefc2008-07-29 22:33:47 -0700507static void __remove_assoc_queue(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508{
509 list_del_init(&bh->b_assoc_buffers);
Jan Kara58ff4072006-10-17 00:10:19 -0700510 WARN_ON(!bh->b_assoc_map);
511 if (buffer_write_io_error(bh))
512 set_bit(AS_EIO, &bh->b_assoc_map->flags);
513 bh->b_assoc_map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514}
515
516int inode_has_buffers(struct inode *inode)
517{
518 return !list_empty(&inode->i_data.private_list);
519}
520
521/*
522 * osync is designed to support O_SYNC io. It waits synchronously for
523 * all already-submitted IO to complete, but does not queue any new
524 * writes to the disk.
525 *
526 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
527 * you dirty the buffers, and then use osync_inode_buffers to wait for
528 * completion. Any other dirty buffers which are not yet queued for
529 * write will not be flushed to disk by the osync.
530 */
531static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
532{
533 struct buffer_head *bh;
534 struct list_head *p;
535 int err = 0;
536
537 spin_lock(lock);
538repeat:
539 list_for_each_prev(p, list) {
540 bh = BH_ENTRY(p);
541 if (buffer_locked(bh)) {
542 get_bh(bh);
543 spin_unlock(lock);
544 wait_on_buffer(bh);
545 if (!buffer_uptodate(bh))
546 err = -EIO;
547 brelse(bh);
548 spin_lock(lock);
549 goto repeat;
550 }
551 }
552 spin_unlock(lock);
553 return err;
554}
555
Al Viro01a05b32010-03-23 06:06:58 -0400556static void do_thaw_one(struct super_block *sb, void *unused)
557{
558 char b[BDEVNAME_SIZE];
559 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
560 printk(KERN_WARNING "Emergency Thaw on %s\n",
561 bdevname(sb->s_bdev, b));
562}
563
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700564static void do_thaw_all(struct work_struct *work)
Eric Sandeenc2d75432009-03-31 15:23:46 -0700565{
Al Viro01a05b32010-03-23 06:06:58 -0400566 iterate_supers(do_thaw_one, NULL);
Jens Axboe053c5252009-04-08 13:44:08 +0200567 kfree(work);
Eric Sandeenc2d75432009-03-31 15:23:46 -0700568 printk(KERN_WARNING "Emergency Thaw complete\n");
569}
570
571/**
572 * emergency_thaw_all -- forcibly thaw every frozen filesystem
573 *
574 * Used for emergency unfreeze of all filesystems via SysRq
575 */
576void emergency_thaw_all(void)
577{
Jens Axboe053c5252009-04-08 13:44:08 +0200578 struct work_struct *work;
579
580 work = kmalloc(sizeof(*work), GFP_ATOMIC);
581 if (work) {
582 INIT_WORK(work, do_thaw_all);
583 schedule_work(work);
584 }
Eric Sandeenc2d75432009-03-31 15:23:46 -0700585}
586
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587/**
Randy Dunlap78a4a502008-02-29 22:02:31 -0800588 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
Martin Waitz67be2dd2005-05-01 08:59:26 -0700589 * @mapping: the mapping which wants those buffers written
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 *
591 * Starts I/O against the buffers at mapping->private_list, and waits upon
592 * that I/O.
593 *
Martin Waitz67be2dd2005-05-01 08:59:26 -0700594 * Basically, this is a convenience function for fsync().
595 * @mapping is a file or directory which needs those buffers to be written for
596 * a successful fsync().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 */
598int sync_mapping_buffers(struct address_space *mapping)
599{
600 struct address_space *buffer_mapping = mapping->assoc_mapping;
601
602 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
603 return 0;
604
605 return fsync_buffers_list(&buffer_mapping->private_lock,
606 &mapping->private_list);
607}
608EXPORT_SYMBOL(sync_mapping_buffers);
609
610/*
611 * Called when we've recently written block `bblock', and it is known that
612 * `bblock' was for a buffer_boundary() buffer. This means that the block at
613 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
614 * dirty, schedule it for IO. So that indirects merge nicely with their data.
615 */
616void write_boundary_block(struct block_device *bdev,
617 sector_t bblock, unsigned blocksize)
618{
619 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
620 if (bh) {
621 if (buffer_dirty(bh))
622 ll_rw_block(WRITE, 1, &bh);
623 put_bh(bh);
624 }
625}
626
627void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
628{
629 struct address_space *mapping = inode->i_mapping;
630 struct address_space *buffer_mapping = bh->b_page->mapping;
631
632 mark_buffer_dirty(bh);
633 if (!mapping->assoc_mapping) {
634 mapping->assoc_mapping = buffer_mapping;
635 } else {
Eric Sesterhenne827f922006-03-26 18:24:46 +0200636 BUG_ON(mapping->assoc_mapping != buffer_mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 }
Jan Kara535ee2f2008-02-08 04:21:59 -0800638 if (!bh->b_assoc_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 spin_lock(&buffer_mapping->private_lock);
640 list_move_tail(&bh->b_assoc_buffers,
641 &mapping->private_list);
Jan Kara58ff4072006-10-17 00:10:19 -0700642 bh->b_assoc_map = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 spin_unlock(&buffer_mapping->private_lock);
644 }
645}
646EXPORT_SYMBOL(mark_buffer_dirty_inode);
647
648/*
Nick Piggin787d2212007-07-17 04:03:34 -0700649 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
650 * dirty.
651 *
652 * If warn is true, then emit a warning if the page is not uptodate and has
653 * not been truncated.
654 */
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700655static void __set_page_dirty(struct page *page,
Nick Piggin787d2212007-07-17 04:03:34 -0700656 struct address_space *mapping, int warn)
657{
Nick Piggin19fd6232008-07-25 19:45:32 -0700658 spin_lock_irq(&mapping->tree_lock);
Nick Piggin787d2212007-07-17 04:03:34 -0700659 if (page->mapping) { /* Race with truncate? */
660 WARN_ON_ONCE(warn && !PageUptodate(page));
Edward Shishkine3a7cca2009-03-31 15:19:39 -0700661 account_page_dirtied(page, mapping);
Nick Piggin787d2212007-07-17 04:03:34 -0700662 radix_tree_tag_set(&mapping->page_tree,
663 page_index(page), PAGECACHE_TAG_DIRTY);
664 }
Nick Piggin19fd6232008-07-25 19:45:32 -0700665 spin_unlock_irq(&mapping->tree_lock);
Nick Piggin787d2212007-07-17 04:03:34 -0700666 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
Nick Piggin787d2212007-07-17 04:03:34 -0700667}
668
669/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 * Add a page to the dirty page list.
671 *
672 * It is a sad fact of life that this function is called from several places
673 * deeply under spinlocking. It may not sleep.
674 *
675 * If the page has buffers, the uptodate buffers are set dirty, to preserve
676 * dirty-state coherency between the page and the buffers. It the page does
677 * not have buffers then when they are later attached they will all be set
678 * dirty.
679 *
680 * The buffers are dirtied before the page is dirtied. There's a small race
681 * window in which a writepage caller may see the page cleanness but not the
682 * buffer dirtiness. That's fine. If this code were to set the page dirty
683 * before the buffers, a concurrent writepage caller could clear the page dirty
684 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
685 * page on the dirty page list.
686 *
687 * We use private_lock to lock against try_to_free_buffers while using the
688 * page's buffer list. Also use this to protect against clean buffers being
689 * added to the page after it was set dirty.
690 *
691 * FIXME: may need to call ->reservepage here as well. That's rather up to the
692 * address_space though.
693 */
694int __set_page_dirty_buffers(struct page *page)
695{
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700696 int newly_dirty;
Nick Piggin787d2212007-07-17 04:03:34 -0700697 struct address_space *mapping = page_mapping(page);
Nick Pigginebf7a222006-10-10 04:36:54 +0200698
699 if (unlikely(!mapping))
700 return !TestSetPageDirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
702 spin_lock(&mapping->private_lock);
703 if (page_has_buffers(page)) {
704 struct buffer_head *head = page_buffers(page);
705 struct buffer_head *bh = head;
706
707 do {
708 set_buffer_dirty(bh);
709 bh = bh->b_this_page;
710 } while (bh != head);
711 }
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700712 newly_dirty = !TestSetPageDirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 spin_unlock(&mapping->private_lock);
714
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700715 if (newly_dirty)
716 __set_page_dirty(page, mapping, 1);
717 return newly_dirty;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718}
719EXPORT_SYMBOL(__set_page_dirty_buffers);
720
721/*
722 * Write out and wait upon a list of buffers.
723 *
724 * We have conflicting pressures: we want to make sure that all
725 * initially dirty buffers get waited on, but that any subsequently
726 * dirtied buffers don't. After all, we don't want fsync to last
727 * forever if somebody is actively writing to the file.
728 *
729 * Do this in two main stages: first we copy dirty buffers to a
730 * temporary inode list, queueing the writes as we go. Then we clean
731 * up, waiting for those writes to complete.
732 *
733 * During this second stage, any subsequent updates to the file may end
734 * up refiling the buffer on the original inode's dirty list again, so
735 * there is a chance we will end up with a buffer queued for write but
736 * not yet completed on that list. So, as a final cleanup we go through
737 * the osync code to catch these locked, dirty buffers without requeuing
738 * any newly dirty buffers for write.
739 */
740static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
741{
742 struct buffer_head *bh;
743 struct list_head tmp;
Jens Axboe7eaceac2011-03-10 08:52:07 +0100744 struct address_space *mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 int err = 0, err2;
746
747 INIT_LIST_HEAD(&tmp);
748
749 spin_lock(lock);
750 while (!list_empty(list)) {
751 bh = BH_ENTRY(list->next);
Jan Kara535ee2f2008-02-08 04:21:59 -0800752 mapping = bh->b_assoc_map;
Jan Kara58ff4072006-10-17 00:10:19 -0700753 __remove_assoc_queue(bh);
Jan Kara535ee2f2008-02-08 04:21:59 -0800754 /* Avoid race with mark_buffer_dirty_inode() which does
755 * a lockless check and we rely on seeing the dirty bit */
756 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 if (buffer_dirty(bh) || buffer_locked(bh)) {
758 list_add(&bh->b_assoc_buffers, &tmp);
Jan Kara535ee2f2008-02-08 04:21:59 -0800759 bh->b_assoc_map = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 if (buffer_dirty(bh)) {
761 get_bh(bh);
762 spin_unlock(lock);
763 /*
764 * Ensure any pending I/O completes so that
Christoph Hellwig9cb569d2010-08-11 17:06:24 +0200765 * write_dirty_buffer() actually writes the
766 * current contents - it is a noop if I/O is
767 * still in flight on potentially older
768 * contents.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 */
Jens Axboe721a9602011-03-09 11:56:30 +0100770 write_dirty_buffer(bh, WRITE_SYNC);
Jens Axboe9cf6b722009-04-06 14:48:03 +0200771
772 /*
773 * Kick off IO for the previous mapping. Note
774 * that we will not run the very last mapping,
775 * wait_on_buffer() will do that for us
776 * through sync_buffer().
777 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 brelse(bh);
779 spin_lock(lock);
780 }
781 }
782 }
783
784 while (!list_empty(&tmp)) {
785 bh = BH_ENTRY(tmp.prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 get_bh(bh);
Jan Kara535ee2f2008-02-08 04:21:59 -0800787 mapping = bh->b_assoc_map;
788 __remove_assoc_queue(bh);
789 /* Avoid race with mark_buffer_dirty_inode() which does
790 * a lockless check and we rely on seeing the dirty bit */
791 smp_mb();
792 if (buffer_dirty(bh)) {
793 list_add(&bh->b_assoc_buffers,
Jan Karae3892292008-03-04 14:28:33 -0800794 &mapping->private_list);
Jan Kara535ee2f2008-02-08 04:21:59 -0800795 bh->b_assoc_map = mapping;
796 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 spin_unlock(lock);
798 wait_on_buffer(bh);
799 if (!buffer_uptodate(bh))
800 err = -EIO;
801 brelse(bh);
802 spin_lock(lock);
803 }
804
805 spin_unlock(lock);
806 err2 = osync_buffers_list(lock, list);
807 if (err)
808 return err;
809 else
810 return err2;
811}
812
813/*
814 * Invalidate any and all dirty buffers on a given inode. We are
815 * probably unmounting the fs, but that doesn't mean we have already
816 * done a sync(). Just drop the buffers from the inode list.
817 *
818 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
819 * assumes that all the buffers are against the blockdev. Not true
820 * for reiserfs.
821 */
822void invalidate_inode_buffers(struct inode *inode)
823{
824 if (inode_has_buffers(inode)) {
825 struct address_space *mapping = &inode->i_data;
826 struct list_head *list = &mapping->private_list;
827 struct address_space *buffer_mapping = mapping->assoc_mapping;
828
829 spin_lock(&buffer_mapping->private_lock);
830 while (!list_empty(list))
831 __remove_assoc_queue(BH_ENTRY(list->next));
832 spin_unlock(&buffer_mapping->private_lock);
833 }
834}
Jan Kara52b19ac2008-09-23 18:24:08 +0200835EXPORT_SYMBOL(invalidate_inode_buffers);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
837/*
838 * Remove any clean buffers from the inode's buffer list. This is called
839 * when we're trying to free the inode itself. Those buffers can pin it.
840 *
841 * Returns true if all buffers were removed.
842 */
843int remove_inode_buffers(struct inode *inode)
844{
845 int ret = 1;
846
847 if (inode_has_buffers(inode)) {
848 struct address_space *mapping = &inode->i_data;
849 struct list_head *list = &mapping->private_list;
850 struct address_space *buffer_mapping = mapping->assoc_mapping;
851
852 spin_lock(&buffer_mapping->private_lock);
853 while (!list_empty(list)) {
854 struct buffer_head *bh = BH_ENTRY(list->next);
855 if (buffer_dirty(bh)) {
856 ret = 0;
857 break;
858 }
859 __remove_assoc_queue(bh);
860 }
861 spin_unlock(&buffer_mapping->private_lock);
862 }
863 return ret;
864}
865
866/*
867 * Create the appropriate buffers when given a page for data area and
868 * the size of each buffer.. Use the bh->b_this_page linked list to
869 * follow the buffers created. Return NULL if unable to create more
870 * buffers.
871 *
872 * The retry flag is used to differentiate async IO (paging, swapping)
873 * which may not fail from ordinary buffer allocations.
874 */
875struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
876 int retry)
877{
878 struct buffer_head *bh, *head;
879 long offset;
880
881try_again:
882 head = NULL;
883 offset = PAGE_SIZE;
884 while ((offset -= size) >= 0) {
885 bh = alloc_buffer_head(GFP_NOFS);
886 if (!bh)
887 goto no_grow;
888
889 bh->b_bdev = NULL;
890 bh->b_this_page = head;
891 bh->b_blocknr = -1;
892 head = bh;
893
894 bh->b_state = 0;
895 atomic_set(&bh->b_count, 0);
896 bh->b_size = size;
897
898 /* Link the buffer to its page */
899 set_bh_page(bh, page, offset);
900
Nathan Scott01ffe332006-01-17 09:02:07 +1100901 init_buffer(bh, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 }
903 return head;
904/*
905 * In case anything failed, we just free everything we got.
906 */
907no_grow:
908 if (head) {
909 do {
910 bh = head;
911 head = head->b_this_page;
912 free_buffer_head(bh);
913 } while (head);
914 }
915
916 /*
917 * Return failure for non-async IO requests. Async IO requests
918 * are not allowed to fail, so we have to wait until buffer heads
919 * become available. But we don't want tasks sleeping with
920 * partially complete buffers, so all were released above.
921 */
922 if (!retry)
923 return NULL;
924
925 /* We're _really_ low on memory. Now we just
926 * wait for old buffer heads to become free due to
927 * finishing IO. Since this is an async request and
928 * the reserve list is empty, we're sure there are
929 * async buffer heads in use.
930 */
931 free_more_memory();
932 goto try_again;
933}
934EXPORT_SYMBOL_GPL(alloc_page_buffers);
935
936static inline void
937link_dev_buffers(struct page *page, struct buffer_head *head)
938{
939 struct buffer_head *bh, *tail;
940
941 bh = head;
942 do {
943 tail = bh;
944 bh = bh->b_this_page;
945 } while (bh);
946 tail->b_this_page = head;
947 attach_page_buffers(page, head);
948}
949
950/*
951 * Initialise the state of a blockdev page's buffers.
952 */
953static void
954init_page_buffers(struct page *page, struct block_device *bdev,
955 sector_t block, int size)
956{
957 struct buffer_head *head = page_buffers(page);
958 struct buffer_head *bh = head;
959 int uptodate = PageUptodate(page);
960
961 do {
962 if (!buffer_mapped(bh)) {
963 init_buffer(bh, NULL, NULL);
964 bh->b_bdev = bdev;
965 bh->b_blocknr = block;
966 if (uptodate)
967 set_buffer_uptodate(bh);
968 set_buffer_mapped(bh);
969 }
970 block++;
971 bh = bh->b_this_page;
972 } while (bh != head);
973}
974
975/*
976 * Create the page-cache page that contains the requested block.
977 *
978 * This is user purely for blockdev mappings.
979 */
980static struct page *
981grow_dev_page(struct block_device *bdev, sector_t block,
982 pgoff_t index, int size)
983{
984 struct inode *inode = bdev->bd_inode;
985 struct page *page;
986 struct buffer_head *bh;
987
Christoph Lameterea125892007-05-16 22:11:21 -0700988 page = find_or_create_page(inode->i_mapping, index,
Mel Gorman769848c2007-07-17 04:03:05 -0700989 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 if (!page)
991 return NULL;
992
Eric Sesterhenne827f922006-03-26 18:24:46 +0200993 BUG_ON(!PageLocked(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
995 if (page_has_buffers(page)) {
996 bh = page_buffers(page);
997 if (bh->b_size == size) {
998 init_page_buffers(page, bdev, block, size);
999 return page;
1000 }
1001 if (!try_to_free_buffers(page))
1002 goto failed;
1003 }
1004
1005 /*
1006 * Allocate some buffers for this page
1007 */
1008 bh = alloc_page_buffers(page, size, 0);
1009 if (!bh)
1010 goto failed;
1011
1012 /*
1013 * Link the page to the buffers and initialise them. Take the
1014 * lock to be atomic wrt __find_get_block(), which does not
1015 * run under the page lock.
1016 */
1017 spin_lock(&inode->i_mapping->private_lock);
1018 link_dev_buffers(page, bh);
1019 init_page_buffers(page, bdev, block, size);
1020 spin_unlock(&inode->i_mapping->private_lock);
1021 return page;
1022
1023failed:
1024 BUG();
1025 unlock_page(page);
1026 page_cache_release(page);
1027 return NULL;
1028}
1029
1030/*
1031 * Create buffers for the specified block device block's page. If
1032 * that page was dirty, the buffers are set dirty also.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001034static int
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035grow_buffers(struct block_device *bdev, sector_t block, int size)
1036{
1037 struct page *page;
1038 pgoff_t index;
1039 int sizebits;
1040
1041 sizebits = -1;
1042 do {
1043 sizebits++;
1044 } while ((size << sizebits) < PAGE_SIZE);
1045
1046 index = block >> sizebits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047
Andrew Mortone5657932006-10-11 01:21:46 -07001048 /*
1049 * Check for a block which wants to lie outside our maximum possible
1050 * pagecache index. (this comparison is done using sector_t types).
1051 */
1052 if (unlikely(index != block >> sizebits)) {
1053 char b[BDEVNAME_SIZE];
1054
1055 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1056 "device %s\n",
Harvey Harrison8e24eea2008-04-30 00:55:09 -07001057 __func__, (unsigned long long)block,
Andrew Mortone5657932006-10-11 01:21:46 -07001058 bdevname(bdev, b));
1059 return -EIO;
1060 }
1061 block = index << sizebits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 /* Create a page with the proper size buffers.. */
1063 page = grow_dev_page(bdev, block, index, size);
1064 if (!page)
1065 return 0;
1066 unlock_page(page);
1067 page_cache_release(page);
1068 return 1;
1069}
1070
Adrian Bunk75c96f82005-05-05 16:16:09 -07001071static struct buffer_head *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072__getblk_slow(struct block_device *bdev, sector_t block, int size)
1073{
1074 /* Size must be multiple of hard sectorsize */
Martin K. Petersene1defc42009-05-22 17:17:49 -04001075 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 (size < 512 || size > PAGE_SIZE))) {
1077 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1078 size);
Martin K. Petersene1defc42009-05-22 17:17:49 -04001079 printk(KERN_ERR "logical block size: %d\n",
1080 bdev_logical_block_size(bdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081
1082 dump_stack();
1083 return NULL;
1084 }
1085
1086 for (;;) {
1087 struct buffer_head * bh;
Andrew Mortone5657932006-10-11 01:21:46 -07001088 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089
1090 bh = __find_get_block(bdev, block, size);
1091 if (bh)
1092 return bh;
1093
Andrew Mortone5657932006-10-11 01:21:46 -07001094 ret = grow_buffers(bdev, block, size);
1095 if (ret < 0)
1096 return NULL;
1097 if (ret == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 free_more_memory();
1099 }
1100}
1101
1102/*
1103 * The relationship between dirty buffers and dirty pages:
1104 *
1105 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1106 * the page is tagged dirty in its radix tree.
1107 *
1108 * At all times, the dirtiness of the buffers represents the dirtiness of
1109 * subsections of the page. If the page has buffers, the page dirty bit is
1110 * merely a hint about the true dirty state.
1111 *
1112 * When a page is set dirty in its entirety, all its buffers are marked dirty
1113 * (if the page has buffers).
1114 *
1115 * When a buffer is marked dirty, its page is dirtied, but the page's other
1116 * buffers are not.
1117 *
1118 * Also. When blockdev buffers are explicitly read with bread(), they
1119 * individually become uptodate. But their backing page remains not
1120 * uptodate - even if all of its buffers are uptodate. A subsequent
1121 * block_read_full_page() against that page will discover all the uptodate
1122 * buffers, will set the page uptodate and will perform no I/O.
1123 */
1124
1125/**
1126 * mark_buffer_dirty - mark a buffer_head as needing writeout
Martin Waitz67be2dd2005-05-01 08:59:26 -07001127 * @bh: the buffer_head to mark dirty
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 *
1129 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1130 * backing page dirty, then tag the page as dirty in its address_space's radix
1131 * tree and then attach the address_space's inode to its superblock's dirty
1132 * inode list.
1133 *
1134 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1135 * mapping->tree_lock and the global inode_lock.
1136 */
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -08001137void mark_buffer_dirty(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138{
Nick Piggin787d2212007-07-17 04:03:34 -07001139 WARN_ON_ONCE(!buffer_uptodate(bh));
Linus Torvalds1be62dc2008-04-04 14:38:17 -07001140
1141 /*
1142 * Very *carefully* optimize the it-is-already-dirty case.
1143 *
1144 * Don't let the final "is it dirty" escape to before we
1145 * perhaps modified the buffer.
1146 */
1147 if (buffer_dirty(bh)) {
1148 smp_mb();
1149 if (buffer_dirty(bh))
1150 return;
1151 }
1152
Linus Torvaldsa8e7d492009-03-19 11:32:05 -07001153 if (!test_set_buffer_dirty(bh)) {
1154 struct page *page = bh->b_page;
Linus Torvalds8e9d78e2009-08-21 17:40:08 -07001155 if (!TestSetPageDirty(page)) {
1156 struct address_space *mapping = page_mapping(page);
1157 if (mapping)
1158 __set_page_dirty(page, mapping, 0);
1159 }
Linus Torvaldsa8e7d492009-03-19 11:32:05 -07001160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07001162EXPORT_SYMBOL(mark_buffer_dirty);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163
1164/*
1165 * Decrement a buffer_head's reference count. If all buffers against a page
1166 * have zero reference count, are clean and unlocked, and if the page is clean
1167 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1168 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1169 * a page but it ends up not being freed, and buffers may later be reattached).
1170 */
1171void __brelse(struct buffer_head * buf)
1172{
1173 if (atomic_read(&buf->b_count)) {
1174 put_bh(buf);
1175 return;
1176 }
Arjan van de Ven5c752ad2008-07-25 19:45:40 -07001177 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07001179EXPORT_SYMBOL(__brelse);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180
1181/*
1182 * bforget() is like brelse(), except it discards any
1183 * potentially dirty data.
1184 */
1185void __bforget(struct buffer_head *bh)
1186{
1187 clear_buffer_dirty(bh);
Jan Kara535ee2f2008-02-08 04:21:59 -08001188 if (bh->b_assoc_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 struct address_space *buffer_mapping = bh->b_page->mapping;
1190
1191 spin_lock(&buffer_mapping->private_lock);
1192 list_del_init(&bh->b_assoc_buffers);
Jan Kara58ff4072006-10-17 00:10:19 -07001193 bh->b_assoc_map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 spin_unlock(&buffer_mapping->private_lock);
1195 }
1196 __brelse(bh);
1197}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07001198EXPORT_SYMBOL(__bforget);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199
1200static struct buffer_head *__bread_slow(struct buffer_head *bh)
1201{
1202 lock_buffer(bh);
1203 if (buffer_uptodate(bh)) {
1204 unlock_buffer(bh);
1205 return bh;
1206 } else {
1207 get_bh(bh);
1208 bh->b_end_io = end_buffer_read_sync;
1209 submit_bh(READ, bh);
1210 wait_on_buffer(bh);
1211 if (buffer_uptodate(bh))
1212 return bh;
1213 }
1214 brelse(bh);
1215 return NULL;
1216}
1217
1218/*
1219 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1220 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1221 * refcount elevated by one when they're in an LRU. A buffer can only appear
1222 * once in a particular CPU's LRU. A single buffer can be present in multiple
1223 * CPU's LRUs at the same time.
1224 *
1225 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1226 * sb_find_get_block().
1227 *
1228 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1229 * a local interrupt disable for that.
1230 */
1231
1232#define BH_LRU_SIZE 8
1233
1234struct bh_lru {
1235 struct buffer_head *bhs[BH_LRU_SIZE];
1236};
1237
1238static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1239
1240#ifdef CONFIG_SMP
1241#define bh_lru_lock() local_irq_disable()
1242#define bh_lru_unlock() local_irq_enable()
1243#else
1244#define bh_lru_lock() preempt_disable()
1245#define bh_lru_unlock() preempt_enable()
1246#endif
1247
1248static inline void check_irqs_on(void)
1249{
1250#ifdef irqs_disabled
1251 BUG_ON(irqs_disabled());
1252#endif
1253}
1254
1255/*
1256 * The LRU management algorithm is dopey-but-simple. Sorry.
1257 */
1258static void bh_lru_install(struct buffer_head *bh)
1259{
1260 struct buffer_head *evictee = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261
1262 check_irqs_on();
1263 bh_lru_lock();
Christoph Lameterc7b92512010-12-06 11:16:28 -06001264 if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 struct buffer_head *bhs[BH_LRU_SIZE];
1266 int in;
1267 int out = 0;
1268
1269 get_bh(bh);
1270 bhs[out++] = bh;
1271 for (in = 0; in < BH_LRU_SIZE; in++) {
Christoph Lameterc7b92512010-12-06 11:16:28 -06001272 struct buffer_head *bh2 =
1273 __this_cpu_read(bh_lrus.bhs[in]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
1275 if (bh2 == bh) {
1276 __brelse(bh2);
1277 } else {
1278 if (out >= BH_LRU_SIZE) {
1279 BUG_ON(evictee != NULL);
1280 evictee = bh2;
1281 } else {
1282 bhs[out++] = bh2;
1283 }
1284 }
1285 }
1286 while (out < BH_LRU_SIZE)
1287 bhs[out++] = NULL;
Christoph Lameterc7b92512010-12-06 11:16:28 -06001288 memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 }
1290 bh_lru_unlock();
1291
1292 if (evictee)
1293 __brelse(evictee);
1294}
1295
1296/*
1297 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1298 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001299static struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001300lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301{
1302 struct buffer_head *ret = NULL;
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001303 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304
1305 check_irqs_on();
1306 bh_lru_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 for (i = 0; i < BH_LRU_SIZE; i++) {
Christoph Lameterc7b92512010-12-06 11:16:28 -06001308 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309
1310 if (bh && bh->b_bdev == bdev &&
1311 bh->b_blocknr == block && bh->b_size == size) {
1312 if (i) {
1313 while (i) {
Christoph Lameterc7b92512010-12-06 11:16:28 -06001314 __this_cpu_write(bh_lrus.bhs[i],
1315 __this_cpu_read(bh_lrus.bhs[i - 1]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 i--;
1317 }
Christoph Lameterc7b92512010-12-06 11:16:28 -06001318 __this_cpu_write(bh_lrus.bhs[0], bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 }
1320 get_bh(bh);
1321 ret = bh;
1322 break;
1323 }
1324 }
1325 bh_lru_unlock();
1326 return ret;
1327}
1328
1329/*
1330 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1331 * it in the LRU and mark it as accessed. If it is not present then return
1332 * NULL
1333 */
1334struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001335__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336{
1337 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1338
1339 if (bh == NULL) {
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -08001340 bh = __find_get_block_slow(bdev, block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 if (bh)
1342 bh_lru_install(bh);
1343 }
1344 if (bh)
1345 touch_buffer(bh);
1346 return bh;
1347}
1348EXPORT_SYMBOL(__find_get_block);
1349
1350/*
1351 * __getblk will locate (and, if necessary, create) the buffer_head
1352 * which corresponds to the passed block_device, block and size. The
1353 * returned buffer has its reference count incremented.
1354 *
1355 * __getblk() cannot fail - it just keeps trying. If you pass it an
1356 * illegal block number, __getblk() will happily return a buffer_head
1357 * which represents the non-existent block. Very weird.
1358 *
1359 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1360 * attempt is failing. FIXME, perhaps?
1361 */
1362struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001363__getblk(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364{
1365 struct buffer_head *bh = __find_get_block(bdev, block, size);
1366
1367 might_sleep();
1368 if (bh == NULL)
1369 bh = __getblk_slow(bdev, block, size);
1370 return bh;
1371}
1372EXPORT_SYMBOL(__getblk);
1373
1374/*
1375 * Do async read-ahead on a buffer..
1376 */
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001377void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378{
1379 struct buffer_head *bh = __getblk(bdev, block, size);
Andrew Mortona3e713b2005-10-30 15:03:15 -08001380 if (likely(bh)) {
1381 ll_rw_block(READA, 1, &bh);
1382 brelse(bh);
1383 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384}
1385EXPORT_SYMBOL(__breadahead);
1386
1387/**
1388 * __bread() - reads a specified block and returns the bh
Martin Waitz67be2dd2005-05-01 08:59:26 -07001389 * @bdev: the block_device to read from
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 * @block: number of block
1391 * @size: size (in bytes) to read
1392 *
1393 * Reads a specified block, and returns buffer head that contains it.
1394 * It returns NULL if the block was unreadable.
1395 */
1396struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001397__bread(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398{
1399 struct buffer_head *bh = __getblk(bdev, block, size);
1400
Andrew Mortona3e713b2005-10-30 15:03:15 -08001401 if (likely(bh) && !buffer_uptodate(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 bh = __bread_slow(bh);
1403 return bh;
1404}
1405EXPORT_SYMBOL(__bread);
1406
1407/*
1408 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1409 * This doesn't race because it runs in each cpu either in irq
1410 * or with preempt disabled.
1411 */
1412static void invalidate_bh_lru(void *arg)
1413{
1414 struct bh_lru *b = &get_cpu_var(bh_lrus);
1415 int i;
1416
1417 for (i = 0; i < BH_LRU_SIZE; i++) {
1418 brelse(b->bhs[i]);
1419 b->bhs[i] = NULL;
1420 }
1421 put_cpu_var(bh_lrus);
1422}
1423
Peter Zijlstraf9a14392007-05-06 14:49:55 -07001424void invalidate_bh_lrus(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425{
Jens Axboe15c8b6c2008-05-09 09:39:44 +02001426 on_each_cpu(invalidate_bh_lru, NULL, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427}
Nick Piggin9db55792008-02-08 04:19:49 -08001428EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
1430void set_bh_page(struct buffer_head *bh,
1431 struct page *page, unsigned long offset)
1432{
1433 bh->b_page = page;
Eric Sesterhenne827f922006-03-26 18:24:46 +02001434 BUG_ON(offset >= PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 if (PageHighMem(page))
1436 /*
1437 * This catches illegal uses and preserves the offset:
1438 */
1439 bh->b_data = (char *)(0 + offset);
1440 else
1441 bh->b_data = page_address(page) + offset;
1442}
1443EXPORT_SYMBOL(set_bh_page);
1444
1445/*
1446 * Called when truncating a buffer on a page completely.
1447 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001448static void discard_buffer(struct buffer_head * bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449{
1450 lock_buffer(bh);
1451 clear_buffer_dirty(bh);
1452 bh->b_bdev = NULL;
1453 clear_buffer_mapped(bh);
1454 clear_buffer_req(bh);
1455 clear_buffer_new(bh);
1456 clear_buffer_delay(bh);
David Chinner33a266d2007-02-12 00:51:41 -08001457 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 unlock_buffer(bh);
1459}
1460
1461/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 * block_invalidatepage - invalidate part of all of a buffer-backed page
1463 *
1464 * @page: the page which is affected
1465 * @offset: the index of the truncation point
1466 *
1467 * block_invalidatepage() is called when all or part of the page has become
1468 * invalidatedby a truncate operation.
1469 *
1470 * block_invalidatepage() does not have to release all buffers, but it must
1471 * ensure that no dirty buffer is left outside @offset and that no I/O
1472 * is underway against any of the blocks which are outside the truncation
1473 * point. Because the caller is about to free (and possibly reuse) those
1474 * blocks on-disk.
1475 */
NeilBrown2ff28e22006-03-26 01:37:18 -08001476void block_invalidatepage(struct page *page, unsigned long offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477{
1478 struct buffer_head *head, *bh, *next;
1479 unsigned int curr_off = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480
1481 BUG_ON(!PageLocked(page));
1482 if (!page_has_buffers(page))
1483 goto out;
1484
1485 head = page_buffers(page);
1486 bh = head;
1487 do {
1488 unsigned int next_off = curr_off + bh->b_size;
1489 next = bh->b_this_page;
1490
1491 /*
1492 * is this block fully invalidated?
1493 */
1494 if (offset <= curr_off)
1495 discard_buffer(bh);
1496 curr_off = next_off;
1497 bh = next;
1498 } while (bh != head);
1499
1500 /*
1501 * We release buffers only if the entire page is being invalidated.
1502 * The get_block cached value has been unconditionally invalidated,
1503 * so real IO is not possible anymore.
1504 */
1505 if (offset == 0)
NeilBrown2ff28e22006-03-26 01:37:18 -08001506 try_to_release_page(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507out:
NeilBrown2ff28e22006-03-26 01:37:18 -08001508 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509}
1510EXPORT_SYMBOL(block_invalidatepage);
1511
1512/*
1513 * We attach and possibly dirty the buffers atomically wrt
1514 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1515 * is already excluded via the page lock.
1516 */
1517void create_empty_buffers(struct page *page,
1518 unsigned long blocksize, unsigned long b_state)
1519{
1520 struct buffer_head *bh, *head, *tail;
1521
1522 head = alloc_page_buffers(page, blocksize, 1);
1523 bh = head;
1524 do {
1525 bh->b_state |= b_state;
1526 tail = bh;
1527 bh = bh->b_this_page;
1528 } while (bh);
1529 tail->b_this_page = head;
1530
1531 spin_lock(&page->mapping->private_lock);
1532 if (PageUptodate(page) || PageDirty(page)) {
1533 bh = head;
1534 do {
1535 if (PageDirty(page))
1536 set_buffer_dirty(bh);
1537 if (PageUptodate(page))
1538 set_buffer_uptodate(bh);
1539 bh = bh->b_this_page;
1540 } while (bh != head);
1541 }
1542 attach_page_buffers(page, head);
1543 spin_unlock(&page->mapping->private_lock);
1544}
1545EXPORT_SYMBOL(create_empty_buffers);
1546
1547/*
1548 * We are taking a block for data and we don't want any output from any
1549 * buffer-cache aliases starting from return from that function and
1550 * until the moment when something will explicitly mark the buffer
1551 * dirty (hopefully that will not happen until we will free that block ;-)
1552 * We don't even need to mark it not-uptodate - nobody can expect
1553 * anything from a newly allocated buffer anyway. We used to used
1554 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1555 * don't want to mark the alias unmapped, for example - it would confuse
1556 * anyone who might pick it with bread() afterwards...
1557 *
1558 * Also.. Note that bforget() doesn't lock the buffer. So there can
1559 * be writeout I/O going on against recently-freed buffers. We don't
1560 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1561 * only if we really need to. That happens here.
1562 */
1563void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1564{
1565 struct buffer_head *old_bh;
1566
1567 might_sleep();
1568
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -08001569 old_bh = __find_get_block_slow(bdev, block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 if (old_bh) {
1571 clear_buffer_dirty(old_bh);
1572 wait_on_buffer(old_bh);
1573 clear_buffer_req(old_bh);
1574 __brelse(old_bh);
1575 }
1576}
1577EXPORT_SYMBOL(unmap_underlying_metadata);
1578
1579/*
1580 * NOTE! All mapped/uptodate combinations are valid:
1581 *
1582 * Mapped Uptodate Meaning
1583 *
1584 * No No "unknown" - must do get_block()
1585 * No Yes "hole" - zero-filled
1586 * Yes No "allocated" - allocated on disk, not read in
1587 * Yes Yes "valid" - allocated and up-to-date in memory.
1588 *
1589 * "Dirty" is valid only with the last case (mapped+uptodate).
1590 */
1591
1592/*
1593 * While block_write_full_page is writing back the dirty buffers under
1594 * the page lock, whoever dirtied the buffers may decide to clean them
1595 * again at any time. We handle that by only looking at the buffer
1596 * state inside lock_buffer().
1597 *
1598 * If block_write_full_page() is called for regular writeback
1599 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1600 * locked buffer. This only can happen if someone has written the buffer
1601 * directly, with submit_bh(). At the address_space level PageWriteback
1602 * prevents this contention from occurring.
Theodore Ts'o6e34eed2009-04-07 18:12:43 -04001603 *
1604 * If block_write_full_page() is called with wbc->sync_mode ==
Jens Axboe721a9602011-03-09 11:56:30 +01001605 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
1606 * causes the writes to be flagged as synchronous writes.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 */
1608static int __block_write_full_page(struct inode *inode, struct page *page,
Chris Mason35c80d52009-04-15 13:22:38 -04001609 get_block_t *get_block, struct writeback_control *wbc,
1610 bh_end_io_t *handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611{
1612 int err;
1613 sector_t block;
1614 sector_t last_block;
Andrew Mortonf0fbd5f2005-05-05 16:15:48 -07001615 struct buffer_head *bh, *head;
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001616 const unsigned blocksize = 1 << inode->i_blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 int nr_underway = 0;
Theodore Ts'o6e34eed2009-04-07 18:12:43 -04001618 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
Jens Axboe721a9602011-03-09 11:56:30 +01001619 WRITE_SYNC : WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620
1621 BUG_ON(!PageLocked(page));
1622
1623 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1624
1625 if (!page_has_buffers(page)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001626 create_empty_buffers(page, blocksize,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 (1 << BH_Dirty)|(1 << BH_Uptodate));
1628 }
1629
1630 /*
1631 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1632 * here, and the (potentially unmapped) buffers may become dirty at
1633 * any time. If a buffer becomes dirty here after we've inspected it
1634 * then we just miss that fact, and the page stays dirty.
1635 *
1636 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1637 * handle that here by just cleaning them.
1638 */
1639
Andrew Morton54b21a72006-01-08 01:03:05 -08001640 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 head = page_buffers(page);
1642 bh = head;
1643
1644 /*
1645 * Get all the dirty buffers mapped to disk addresses and
1646 * handle any aliases from the underlying blockdev's mapping.
1647 */
1648 do {
1649 if (block > last_block) {
1650 /*
1651 * mapped buffers outside i_size will occur, because
1652 * this page can be outside i_size when there is a
1653 * truncate in progress.
1654 */
1655 /*
1656 * The buffer was zeroed by block_write_full_page()
1657 */
1658 clear_buffer_dirty(bh);
1659 set_buffer_uptodate(bh);
Alex Tomas29a814d2008-07-11 19:27:31 -04001660 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1661 buffer_dirty(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001662 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 err = get_block(inode, block, bh, 1);
1664 if (err)
1665 goto recover;
Alex Tomas29a814d2008-07-11 19:27:31 -04001666 clear_buffer_delay(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 if (buffer_new(bh)) {
1668 /* blockdev mappings never come here */
1669 clear_buffer_new(bh);
1670 unmap_underlying_metadata(bh->b_bdev,
1671 bh->b_blocknr);
1672 }
1673 }
1674 bh = bh->b_this_page;
1675 block++;
1676 } while (bh != head);
1677
1678 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 if (!buffer_mapped(bh))
1680 continue;
1681 /*
1682 * If it's a fully non-blocking write attempt and we cannot
1683 * lock the buffer then redirty the page. Note that this can
Jens Axboe5b0830c2009-09-23 19:37:09 +02001684 * potentially cause a busy-wait loop from writeback threads
1685 * and kswapd activity, but those code paths have their own
1686 * higher-level throttling.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 */
Wu Fengguang1b430be2010-10-26 14:21:26 -07001688 if (wbc->sync_mode != WB_SYNC_NONE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 lock_buffer(bh);
Nick Pigginca5de402008-08-02 12:02:13 +02001690 } else if (!trylock_buffer(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 redirty_page_for_writepage(wbc, page);
1692 continue;
1693 }
1694 if (test_clear_buffer_dirty(bh)) {
Chris Mason35c80d52009-04-15 13:22:38 -04001695 mark_buffer_async_write_endio(bh, handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 } else {
1697 unlock_buffer(bh);
1698 }
1699 } while ((bh = bh->b_this_page) != head);
1700
1701 /*
1702 * The page and its buffers are protected by PageWriteback(), so we can
1703 * drop the bh refcounts early.
1704 */
1705 BUG_ON(PageWriteback(page));
1706 set_page_writeback(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707
1708 do {
1709 struct buffer_head *next = bh->b_this_page;
1710 if (buffer_async_write(bh)) {
Theodore Ts'oa64c8612009-03-27 22:14:10 -04001711 submit_bh(write_op, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 nr_underway++;
1713 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 bh = next;
1715 } while (bh != head);
Andrew Morton05937ba2005-05-05 16:15:47 -07001716 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717
1718 err = 0;
1719done:
1720 if (nr_underway == 0) {
1721 /*
1722 * The page was marked dirty, but the buffers were
1723 * clean. Someone wrote them back by hand with
1724 * ll_rw_block/submit_bh. A rare case.
1725 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 end_page_writeback(page);
Nick Piggin3d67f2d2007-05-06 14:49:05 -07001727
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 /*
1729 * The page and buffer_heads can be released at any time from
1730 * here on.
1731 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 }
1733 return err;
1734
1735recover:
1736 /*
1737 * ENOSPC, or some other error. We may already have added some
1738 * blocks to the file, so we need to write these out to avoid
1739 * exposing stale data.
1740 * The page is currently locked and not marked for writeback
1741 */
1742 bh = head;
1743 /* Recovery: lock and submit the mapped buffers */
1744 do {
Alex Tomas29a814d2008-07-11 19:27:31 -04001745 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1746 !buffer_delay(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 lock_buffer(bh);
Chris Mason35c80d52009-04-15 13:22:38 -04001748 mark_buffer_async_write_endio(bh, handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 } else {
1750 /*
1751 * The buffer may have been set dirty during
1752 * attachment to a dirty page.
1753 */
1754 clear_buffer_dirty(bh);
1755 }
1756 } while ((bh = bh->b_this_page) != head);
1757 SetPageError(page);
1758 BUG_ON(PageWriteback(page));
Andrew Morton7e4c3692007-05-08 00:23:27 -07001759 mapping_set_error(page->mapping, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 set_page_writeback(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 do {
1762 struct buffer_head *next = bh->b_this_page;
1763 if (buffer_async_write(bh)) {
1764 clear_buffer_dirty(bh);
Theodore Ts'oa64c8612009-03-27 22:14:10 -04001765 submit_bh(write_op, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 nr_underway++;
1767 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768 bh = next;
1769 } while (bh != head);
Nick Pigginffda9d32007-02-20 13:57:54 -08001770 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 goto done;
1772}
1773
Nick Pigginafddba42007-10-16 01:25:01 -07001774/*
1775 * If a page has any new buffers, zero them out here, and mark them uptodate
1776 * and dirty so they'll be written out (in order to prevent uninitialised
1777 * block data from leaking). And clear the new bit.
1778 */
1779void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1780{
1781 unsigned int block_start, block_end;
1782 struct buffer_head *head, *bh;
1783
1784 BUG_ON(!PageLocked(page));
1785 if (!page_has_buffers(page))
1786 return;
1787
1788 bh = head = page_buffers(page);
1789 block_start = 0;
1790 do {
1791 block_end = block_start + bh->b_size;
1792
1793 if (buffer_new(bh)) {
1794 if (block_end > from && block_start < to) {
1795 if (!PageUptodate(page)) {
1796 unsigned start, size;
1797
1798 start = max(from, block_start);
1799 size = min(to, block_end) - start;
1800
Christoph Lametereebd2aa2008-02-04 22:28:29 -08001801 zero_user(page, start, size);
Nick Pigginafddba42007-10-16 01:25:01 -07001802 set_buffer_uptodate(bh);
1803 }
1804
1805 clear_buffer_new(bh);
1806 mark_buffer_dirty(bh);
1807 }
1808 }
1809
1810 block_start = block_end;
1811 bh = bh->b_this_page;
1812 } while (bh != head);
1813}
1814EXPORT_SYMBOL(page_zero_new_buffers);
1815
Christoph Hellwigebdec242010-10-06 10:47:23 +02001816int __block_write_begin(struct page *page, loff_t pos, unsigned len,
Christoph Hellwig6e1db882010-06-04 11:29:57 +02001817 get_block_t *get_block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818{
Christoph Hellwigebdec242010-10-06 10:47:23 +02001819 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1820 unsigned to = from + len;
Christoph Hellwig6e1db882010-06-04 11:29:57 +02001821 struct inode *inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 unsigned block_start, block_end;
1823 sector_t block;
1824 int err = 0;
1825 unsigned blocksize, bbits;
1826 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1827
1828 BUG_ON(!PageLocked(page));
1829 BUG_ON(from > PAGE_CACHE_SIZE);
1830 BUG_ON(to > PAGE_CACHE_SIZE);
1831 BUG_ON(from > to);
1832
1833 blocksize = 1 << inode->i_blkbits;
1834 if (!page_has_buffers(page))
1835 create_empty_buffers(page, blocksize, 0);
1836 head = page_buffers(page);
1837
1838 bbits = inode->i_blkbits;
1839 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1840
1841 for(bh = head, block_start = 0; bh != head || !block_start;
1842 block++, block_start=block_end, bh = bh->b_this_page) {
1843 block_end = block_start + blocksize;
1844 if (block_end <= from || block_start >= to) {
1845 if (PageUptodate(page)) {
1846 if (!buffer_uptodate(bh))
1847 set_buffer_uptodate(bh);
1848 }
1849 continue;
1850 }
1851 if (buffer_new(bh))
1852 clear_buffer_new(bh);
1853 if (!buffer_mapped(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001854 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 err = get_block(inode, block, bh, 1);
1856 if (err)
Nick Pigginf3ddbdc2005-05-05 16:15:45 -07001857 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858 if (buffer_new(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 unmap_underlying_metadata(bh->b_bdev,
1860 bh->b_blocknr);
1861 if (PageUptodate(page)) {
Nick Piggin637aff42007-10-16 01:25:00 -07001862 clear_buffer_new(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 set_buffer_uptodate(bh);
Nick Piggin637aff42007-10-16 01:25:00 -07001864 mark_buffer_dirty(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 continue;
1866 }
Christoph Lametereebd2aa2008-02-04 22:28:29 -08001867 if (block_end > to || block_start < from)
1868 zero_user_segments(page,
1869 to, block_end,
1870 block_start, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 continue;
1872 }
1873 }
1874 if (PageUptodate(page)) {
1875 if (!buffer_uptodate(bh))
1876 set_buffer_uptodate(bh);
1877 continue;
1878 }
1879 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
David Chinner33a266d2007-02-12 00:51:41 -08001880 !buffer_unwritten(bh) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 (block_start < from || block_end > to)) {
1882 ll_rw_block(READ, 1, &bh);
1883 *wait_bh++=bh;
1884 }
1885 }
1886 /*
1887 * If we issued read requests - let them complete.
1888 */
1889 while(wait_bh > wait) {
1890 wait_on_buffer(*--wait_bh);
1891 if (!buffer_uptodate(*wait_bh))
Nick Pigginf3ddbdc2005-05-05 16:15:45 -07001892 err = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 }
Christoph Hellwig6e1db882010-06-04 11:29:57 +02001894 if (unlikely(err)) {
Nick Pigginafddba42007-10-16 01:25:01 -07001895 page_zero_new_buffers(page, from, to);
Christoph Hellwig6e1db882010-06-04 11:29:57 +02001896 ClearPageUptodate(page);
1897 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 return err;
1899}
Christoph Hellwigebdec242010-10-06 10:47:23 +02001900EXPORT_SYMBOL(__block_write_begin);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901
1902static int __block_commit_write(struct inode *inode, struct page *page,
1903 unsigned from, unsigned to)
1904{
1905 unsigned block_start, block_end;
1906 int partial = 0;
1907 unsigned blocksize;
1908 struct buffer_head *bh, *head;
1909
1910 blocksize = 1 << inode->i_blkbits;
1911
1912 for(bh = head = page_buffers(page), block_start = 0;
1913 bh != head || !block_start;
1914 block_start=block_end, bh = bh->b_this_page) {
1915 block_end = block_start + blocksize;
1916 if (block_end <= from || block_start >= to) {
1917 if (!buffer_uptodate(bh))
1918 partial = 1;
1919 } else {
1920 set_buffer_uptodate(bh);
1921 mark_buffer_dirty(bh);
1922 }
Nick Pigginafddba42007-10-16 01:25:01 -07001923 clear_buffer_new(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 }
1925
1926 /*
1927 * If this is a partial write which happened to make all buffers
1928 * uptodate then we can optimize away a bogus readpage() for
1929 * the next read(). Here we 'discover' whether the page went
1930 * uptodate as a result of this (potentially partial) write.
1931 */
1932 if (!partial)
1933 SetPageUptodate(page);
1934 return 0;
1935}
1936
1937/*
Christoph Hellwig155130a2010-06-04 11:29:58 +02001938 * block_write_begin takes care of the basic task of block allocation and
1939 * bringing partial write blocks uptodate first.
1940 *
npiggin@suse.de7bb46a62010-05-27 01:05:33 +10001941 * The filesystem needs to handle block truncation upon failure.
Nick Pigginafddba42007-10-16 01:25:01 -07001942 */
Christoph Hellwig155130a2010-06-04 11:29:58 +02001943int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1944 unsigned flags, struct page **pagep, get_block_t *get_block)
Nick Pigginafddba42007-10-16 01:25:01 -07001945{
Christoph Hellwig6e1db882010-06-04 11:29:57 +02001946 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
Nick Pigginafddba42007-10-16 01:25:01 -07001947 struct page *page;
Christoph Hellwig6e1db882010-06-04 11:29:57 +02001948 int status;
Nick Pigginafddba42007-10-16 01:25:01 -07001949
Christoph Hellwig6e1db882010-06-04 11:29:57 +02001950 page = grab_cache_page_write_begin(mapping, index, flags);
1951 if (!page)
1952 return -ENOMEM;
Nick Pigginafddba42007-10-16 01:25:01 -07001953
Christoph Hellwig6e1db882010-06-04 11:29:57 +02001954 status = __block_write_begin(page, pos, len, get_block);
Nick Pigginafddba42007-10-16 01:25:01 -07001955 if (unlikely(status)) {
Christoph Hellwig6e1db882010-06-04 11:29:57 +02001956 unlock_page(page);
1957 page_cache_release(page);
1958 page = NULL;
Nick Pigginafddba42007-10-16 01:25:01 -07001959 }
1960
Christoph Hellwig6e1db882010-06-04 11:29:57 +02001961 *pagep = page;
Nick Pigginafddba42007-10-16 01:25:01 -07001962 return status;
1963}
1964EXPORT_SYMBOL(block_write_begin);
1965
1966int block_write_end(struct file *file, struct address_space *mapping,
1967 loff_t pos, unsigned len, unsigned copied,
1968 struct page *page, void *fsdata)
1969{
1970 struct inode *inode = mapping->host;
1971 unsigned start;
1972
1973 start = pos & (PAGE_CACHE_SIZE - 1);
1974
1975 if (unlikely(copied < len)) {
1976 /*
1977 * The buffers that were written will now be uptodate, so we
1978 * don't have to worry about a readpage reading them and
1979 * overwriting a partial write. However if we have encountered
1980 * a short write and only partially written into a buffer, it
1981 * will not be marked uptodate, so a readpage might come in and
1982 * destroy our partial write.
1983 *
1984 * Do the simplest thing, and just treat any short write to a
1985 * non uptodate page as a zero-length write, and force the
1986 * caller to redo the whole thing.
1987 */
1988 if (!PageUptodate(page))
1989 copied = 0;
1990
1991 page_zero_new_buffers(page, start+copied, start+len);
1992 }
1993 flush_dcache_page(page);
1994
1995 /* This could be a short (even 0-length) commit */
1996 __block_commit_write(inode, page, start, start+copied);
1997
1998 return copied;
1999}
2000EXPORT_SYMBOL(block_write_end);
2001
2002int generic_write_end(struct file *file, struct address_space *mapping,
2003 loff_t pos, unsigned len, unsigned copied,
2004 struct page *page, void *fsdata)
2005{
2006 struct inode *inode = mapping->host;
Jan Karac7d206b2008-07-11 19:27:31 -04002007 int i_size_changed = 0;
Nick Pigginafddba42007-10-16 01:25:01 -07002008
2009 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2010
2011 /*
2012 * No need to use i_size_read() here, the i_size
2013 * cannot change under us because we hold i_mutex.
2014 *
2015 * But it's important to update i_size while still holding page lock:
2016 * page writeout could otherwise come in and zero beyond i_size.
2017 */
2018 if (pos+copied > inode->i_size) {
2019 i_size_write(inode, pos+copied);
Jan Karac7d206b2008-07-11 19:27:31 -04002020 i_size_changed = 1;
Nick Pigginafddba42007-10-16 01:25:01 -07002021 }
2022
2023 unlock_page(page);
2024 page_cache_release(page);
2025
Jan Karac7d206b2008-07-11 19:27:31 -04002026 /*
2027 * Don't mark the inode dirty under page lock. First, it unnecessarily
2028 * makes the holding time of page lock longer. Second, it forces lock
2029 * ordering of page lock and transaction start for journaling
2030 * filesystems.
2031 */
2032 if (i_size_changed)
2033 mark_inode_dirty(inode);
2034
Nick Pigginafddba42007-10-16 01:25:01 -07002035 return copied;
2036}
2037EXPORT_SYMBOL(generic_write_end);
2038
2039/*
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07002040 * block_is_partially_uptodate checks whether buffers within a page are
2041 * uptodate or not.
2042 *
2043 * Returns true if all buffers which correspond to a file portion
2044 * we want to read are uptodate.
2045 */
2046int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2047 unsigned long from)
2048{
2049 struct inode *inode = page->mapping->host;
2050 unsigned block_start, block_end, blocksize;
2051 unsigned to;
2052 struct buffer_head *bh, *head;
2053 int ret = 1;
2054
2055 if (!page_has_buffers(page))
2056 return 0;
2057
2058 blocksize = 1 << inode->i_blkbits;
2059 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2060 to = from + to;
2061 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2062 return 0;
2063
2064 head = page_buffers(page);
2065 bh = head;
2066 block_start = 0;
2067 do {
2068 block_end = block_start + blocksize;
2069 if (block_end > from && block_start < to) {
2070 if (!buffer_uptodate(bh)) {
2071 ret = 0;
2072 break;
2073 }
2074 if (block_end >= to)
2075 break;
2076 }
2077 block_start = block_end;
2078 bh = bh->b_this_page;
2079 } while (bh != head);
2080
2081 return ret;
2082}
2083EXPORT_SYMBOL(block_is_partially_uptodate);
2084
2085/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 * Generic "read page" function for block devices that have the normal
2087 * get_block functionality. This is most of the block device filesystems.
2088 * Reads the page asynchronously --- the unlock_buffer() and
2089 * set/clear_buffer_uptodate() functions propagate buffer state into the
2090 * page struct once IO has completed.
2091 */
2092int block_read_full_page(struct page *page, get_block_t *get_block)
2093{
2094 struct inode *inode = page->mapping->host;
2095 sector_t iblock, lblock;
2096 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2097 unsigned int blocksize;
2098 int nr, i;
2099 int fully_mapped = 1;
2100
Matt Mackallcd7619d2005-05-01 08:59:01 -07002101 BUG_ON(!PageLocked(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 blocksize = 1 << inode->i_blkbits;
2103 if (!page_has_buffers(page))
2104 create_empty_buffers(page, blocksize, 0);
2105 head = page_buffers(page);
2106
2107 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2108 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2109 bh = head;
2110 nr = 0;
2111 i = 0;
2112
2113 do {
2114 if (buffer_uptodate(bh))
2115 continue;
2116
2117 if (!buffer_mapped(bh)) {
Andrew Mortonc64610b2005-05-16 21:53:49 -07002118 int err = 0;
2119
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 fully_mapped = 0;
2121 if (iblock < lblock) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002122 WARN_ON(bh->b_size != blocksize);
Andrew Mortonc64610b2005-05-16 21:53:49 -07002123 err = get_block(inode, iblock, bh, 0);
2124 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 SetPageError(page);
2126 }
2127 if (!buffer_mapped(bh)) {
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002128 zero_user(page, i * blocksize, blocksize);
Andrew Mortonc64610b2005-05-16 21:53:49 -07002129 if (!err)
2130 set_buffer_uptodate(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131 continue;
2132 }
2133 /*
2134 * get_block() might have updated the buffer
2135 * synchronously
2136 */
2137 if (buffer_uptodate(bh))
2138 continue;
2139 }
2140 arr[nr++] = bh;
2141 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2142
2143 if (fully_mapped)
2144 SetPageMappedToDisk(page);
2145
2146 if (!nr) {
2147 /*
2148 * All buffers are uptodate - we can set the page uptodate
2149 * as well. But not if get_block() returned an error.
2150 */
2151 if (!PageError(page))
2152 SetPageUptodate(page);
2153 unlock_page(page);
2154 return 0;
2155 }
2156
2157 /* Stage two: lock the buffers */
2158 for (i = 0; i < nr; i++) {
2159 bh = arr[i];
2160 lock_buffer(bh);
2161 mark_buffer_async_read(bh);
2162 }
2163
2164 /*
2165 * Stage 3: start the IO. Check for uptodateness
2166 * inside the buffer lock in case another process reading
2167 * the underlying blockdev brought it uptodate (the sct fix).
2168 */
2169 for (i = 0; i < nr; i++) {
2170 bh = arr[i];
2171 if (buffer_uptodate(bh))
2172 end_buffer_async_read(bh, 1);
2173 else
2174 submit_bh(READ, bh);
2175 }
2176 return 0;
2177}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002178EXPORT_SYMBOL(block_read_full_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179
2180/* utility function for filesystems that need to do work on expanding
Nick Piggin89e10782007-10-16 01:25:07 -07002181 * truncates. Uses filesystem pagecache writes to allow the filesystem to
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 * deal with the hole.
2183 */
Nick Piggin89e10782007-10-16 01:25:07 -07002184int generic_cont_expand_simple(struct inode *inode, loff_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185{
2186 struct address_space *mapping = inode->i_mapping;
2187 struct page *page;
Nick Piggin89e10782007-10-16 01:25:07 -07002188 void *fsdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 int err;
2190
npiggin@suse.dec08d3b02009-08-21 02:35:06 +10002191 err = inode_newsize_ok(inode, size);
2192 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 goto out;
2194
Nick Piggin89e10782007-10-16 01:25:07 -07002195 err = pagecache_write_begin(NULL, mapping, size, 0,
2196 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2197 &page, &fsdata);
2198 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 goto out;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002200
Nick Piggin89e10782007-10-16 01:25:07 -07002201 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2202 BUG_ON(err > 0);
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002203
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204out:
2205 return err;
2206}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002207EXPORT_SYMBOL(generic_cont_expand_simple);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208
Adrian Bunkf1e3af72008-04-29 00:59:01 -07002209static int cont_expand_zero(struct file *file, struct address_space *mapping,
2210 loff_t pos, loff_t *bytes)
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002211{
Nick Piggin89e10782007-10-16 01:25:07 -07002212 struct inode *inode = mapping->host;
2213 unsigned blocksize = 1 << inode->i_blkbits;
2214 struct page *page;
2215 void *fsdata;
2216 pgoff_t index, curidx;
2217 loff_t curpos;
2218 unsigned zerofrom, offset, len;
2219 int err = 0;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002220
Nick Piggin89e10782007-10-16 01:25:07 -07002221 index = pos >> PAGE_CACHE_SHIFT;
2222 offset = pos & ~PAGE_CACHE_MASK;
2223
2224 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2225 zerofrom = curpos & ~PAGE_CACHE_MASK;
2226 if (zerofrom & (blocksize-1)) {
2227 *bytes |= (blocksize-1);
2228 (*bytes)++;
2229 }
2230 len = PAGE_CACHE_SIZE - zerofrom;
2231
2232 err = pagecache_write_begin(file, mapping, curpos, len,
2233 AOP_FLAG_UNINTERRUPTIBLE,
2234 &page, &fsdata);
2235 if (err)
2236 goto out;
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002237 zero_user(page, zerofrom, len);
Nick Piggin89e10782007-10-16 01:25:07 -07002238 err = pagecache_write_end(file, mapping, curpos, len, len,
2239 page, fsdata);
2240 if (err < 0)
2241 goto out;
2242 BUG_ON(err != len);
2243 err = 0;
OGAWA Hirofumi061e9742008-04-28 02:16:28 -07002244
2245 balance_dirty_pages_ratelimited(mapping);
Nick Piggin89e10782007-10-16 01:25:07 -07002246 }
2247
2248 /* page covers the boundary, find the boundary offset */
2249 if (index == curidx) {
2250 zerofrom = curpos & ~PAGE_CACHE_MASK;
2251 /* if we will expand the thing last block will be filled */
2252 if (offset <= zerofrom) {
2253 goto out;
2254 }
2255 if (zerofrom & (blocksize-1)) {
2256 *bytes |= (blocksize-1);
2257 (*bytes)++;
2258 }
2259 len = offset - zerofrom;
2260
2261 err = pagecache_write_begin(file, mapping, curpos, len,
2262 AOP_FLAG_UNINTERRUPTIBLE,
2263 &page, &fsdata);
2264 if (err)
2265 goto out;
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002266 zero_user(page, zerofrom, len);
Nick Piggin89e10782007-10-16 01:25:07 -07002267 err = pagecache_write_end(file, mapping, curpos, len, len,
2268 page, fsdata);
2269 if (err < 0)
2270 goto out;
2271 BUG_ON(err != len);
2272 err = 0;
2273 }
2274out:
2275 return err;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002276}
2277
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278/*
2279 * For moronic filesystems that do not allow holes in file.
2280 * We may have to extend the file.
2281 */
Christoph Hellwig282dc172010-06-04 11:29:55 +02002282int cont_write_begin(struct file *file, struct address_space *mapping,
Nick Piggin89e10782007-10-16 01:25:07 -07002283 loff_t pos, unsigned len, unsigned flags,
2284 struct page **pagep, void **fsdata,
2285 get_block_t *get_block, loff_t *bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 struct inode *inode = mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 unsigned blocksize = 1 << inode->i_blkbits;
Nick Piggin89e10782007-10-16 01:25:07 -07002289 unsigned zerofrom;
2290 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291
Nick Piggin89e10782007-10-16 01:25:07 -07002292 err = cont_expand_zero(file, mapping, pos, bytes);
2293 if (err)
Christoph Hellwig155130a2010-06-04 11:29:58 +02002294 return err;
Nick Piggin89e10782007-10-16 01:25:07 -07002295
2296 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2297 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2298 *bytes |= (blocksize-1);
2299 (*bytes)++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 }
2301
Christoph Hellwig155130a2010-06-04 11:29:58 +02002302 return block_write_begin(mapping, pos, len, flags, pagep, get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002304EXPORT_SYMBOL(cont_write_begin);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306int block_commit_write(struct page *page, unsigned from, unsigned to)
2307{
2308 struct inode *inode = page->mapping->host;
2309 __block_commit_write(inode,page,from,to);
2310 return 0;
2311}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002312EXPORT_SYMBOL(block_commit_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313
David Chinner54171692007-07-19 17:39:55 +10002314/*
2315 * block_page_mkwrite() is not allowed to change the file size as it gets
2316 * called from a page fault handler when a page is first dirtied. Hence we must
2317 * be careful to check for EOF conditions here. We set the page up correctly
2318 * for a written page which means we get ENOSPC checking when writing into
2319 * holes and correct delalloc and unwritten extent mapping on filesystems that
2320 * support these features.
2321 *
2322 * We are not allowed to take the i_mutex here so we have to play games to
2323 * protect against truncate races as the page could now be beyond EOF. Because
npiggin@suse.de7bb46a62010-05-27 01:05:33 +10002324 * truncate writes the inode size before removing pages, once we have the
David Chinner54171692007-07-19 17:39:55 +10002325 * page lock we can determine safely if the page is beyond EOF. If it is not
2326 * beyond EOF, then the page is guaranteed safe against truncation until we
2327 * unlock the page.
2328 */
2329int
Nick Pigginc2ec1752009-03-31 15:23:21 -07002330block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
David Chinner54171692007-07-19 17:39:55 +10002331 get_block_t get_block)
2332{
Nick Pigginc2ec1752009-03-31 15:23:21 -07002333 struct page *page = vmf->page;
David Chinner54171692007-07-19 17:39:55 +10002334 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2335 unsigned long end;
2336 loff_t size;
Nick Piggin56a76f82009-03-31 15:23:23 -07002337 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
David Chinner54171692007-07-19 17:39:55 +10002338
2339 lock_page(page);
2340 size = i_size_read(inode);
2341 if ((page->mapping != inode->i_mapping) ||
Nick Piggin18336332007-07-20 00:31:45 -07002342 (page_offset(page) > size)) {
David Chinner54171692007-07-19 17:39:55 +10002343 /* page got truncated out from underneath us */
Nick Pigginb827e492009-04-30 15:08:16 -07002344 unlock_page(page);
2345 goto out;
David Chinner54171692007-07-19 17:39:55 +10002346 }
2347
2348 /* page is wholly or partially inside EOF */
2349 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2350 end = size & ~PAGE_CACHE_MASK;
2351 else
2352 end = PAGE_CACHE_SIZE;
2353
Christoph Hellwigebdec242010-10-06 10:47:23 +02002354 ret = __block_write_begin(page, 0, end, get_block);
David Chinner54171692007-07-19 17:39:55 +10002355 if (!ret)
2356 ret = block_commit_write(page, 0, end);
2357
Nick Piggin56a76f82009-03-31 15:23:23 -07002358 if (unlikely(ret)) {
Nick Pigginb827e492009-04-30 15:08:16 -07002359 unlock_page(page);
Nick Piggin56a76f82009-03-31 15:23:23 -07002360 if (ret == -ENOMEM)
2361 ret = VM_FAULT_OOM;
2362 else /* -ENOSPC, -EIO, etc */
2363 ret = VM_FAULT_SIGBUS;
Nick Pigginb827e492009-04-30 15:08:16 -07002364 } else
2365 ret = VM_FAULT_LOCKED;
Nick Pigginc2ec1752009-03-31 15:23:21 -07002366
Nick Pigginb827e492009-04-30 15:08:16 -07002367out:
David Chinner54171692007-07-19 17:39:55 +10002368 return ret;
2369}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002370EXPORT_SYMBOL(block_page_mkwrite);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371
2372/*
Nick Piggin03158cd2007-10-16 01:25:25 -07002373 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374 * immediately, while under the page lock. So it needs a special end_io
2375 * handler which does not touch the bh after unlocking it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376 */
2377static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2378{
Dmitry Monakhov68671f32007-10-16 01:24:47 -07002379 __end_buffer_read_notouch(bh, uptodate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380}
2381
2382/*
Nick Piggin03158cd2007-10-16 01:25:25 -07002383 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2384 * the page (converting it to circular linked list and taking care of page
2385 * dirty races).
2386 */
2387static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2388{
2389 struct buffer_head *bh;
2390
2391 BUG_ON(!PageLocked(page));
2392
2393 spin_lock(&page->mapping->private_lock);
2394 bh = head;
2395 do {
2396 if (PageDirty(page))
2397 set_buffer_dirty(bh);
2398 if (!bh->b_this_page)
2399 bh->b_this_page = head;
2400 bh = bh->b_this_page;
2401 } while (bh != head);
2402 attach_page_buffers(page, head);
2403 spin_unlock(&page->mapping->private_lock);
2404}
2405
2406/*
Christoph Hellwigea0f04e2010-06-04 11:29:54 +02002407 * On entry, the page is fully not uptodate.
2408 * On exit the page is fully uptodate in the areas outside (from,to)
npiggin@suse.de7bb46a62010-05-27 01:05:33 +10002409 * The filesystem needs to handle block truncation upon failure.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 */
Christoph Hellwigea0f04e2010-06-04 11:29:54 +02002411int nobh_write_begin(struct address_space *mapping,
Nick Piggin03158cd2007-10-16 01:25:25 -07002412 loff_t pos, unsigned len, unsigned flags,
2413 struct page **pagep, void **fsdata,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 get_block_t *get_block)
2415{
Nick Piggin03158cd2007-10-16 01:25:25 -07002416 struct inode *inode = mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 const unsigned blkbits = inode->i_blkbits;
2418 const unsigned blocksize = 1 << blkbits;
Nick Piggina4b06722007-10-16 01:24:48 -07002419 struct buffer_head *head, *bh;
Nick Piggin03158cd2007-10-16 01:25:25 -07002420 struct page *page;
2421 pgoff_t index;
2422 unsigned from, to;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423 unsigned block_in_page;
Nick Piggina4b06722007-10-16 01:24:48 -07002424 unsigned block_start, block_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425 sector_t block_in_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426 int nr_reads = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 int ret = 0;
2428 int is_mapped_to_disk = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429
Nick Piggin03158cd2007-10-16 01:25:25 -07002430 index = pos >> PAGE_CACHE_SHIFT;
2431 from = pos & (PAGE_CACHE_SIZE - 1);
2432 to = from + len;
2433
Nick Piggin54566b22009-01-04 12:00:53 -08002434 page = grab_cache_page_write_begin(mapping, index, flags);
Nick Piggin03158cd2007-10-16 01:25:25 -07002435 if (!page)
2436 return -ENOMEM;
2437 *pagep = page;
2438 *fsdata = NULL;
2439
2440 if (page_has_buffers(page)) {
Namhyung Kim309f77a2010-10-25 15:01:12 +09002441 ret = __block_write_begin(page, pos, len, get_block);
2442 if (unlikely(ret))
2443 goto out_release;
2444 return ret;
Nick Piggin03158cd2007-10-16 01:25:25 -07002445 }
Nick Piggina4b06722007-10-16 01:24:48 -07002446
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 if (PageMappedToDisk(page))
2448 return 0;
2449
Nick Piggina4b06722007-10-16 01:24:48 -07002450 /*
2451 * Allocate buffers so that we can keep track of state, and potentially
2452 * attach them to the page if an error occurs. In the common case of
2453 * no error, they will just be freed again without ever being attached
2454 * to the page (which is all OK, because we're under the page lock).
2455 *
2456 * Be careful: the buffer linked list is a NULL terminated one, rather
2457 * than the circular one we're used to.
2458 */
2459 head = alloc_page_buffers(page, blocksize, 0);
Nick Piggin03158cd2007-10-16 01:25:25 -07002460 if (!head) {
2461 ret = -ENOMEM;
2462 goto out_release;
2463 }
Nick Piggina4b06722007-10-16 01:24:48 -07002464
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466
2467 /*
2468 * We loop across all blocks in the page, whether or not they are
2469 * part of the affected region. This is so we can discover if the
2470 * page is fully mapped-to-disk.
2471 */
Nick Piggina4b06722007-10-16 01:24:48 -07002472 for (block_start = 0, block_in_page = 0, bh = head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473 block_start < PAGE_CACHE_SIZE;
Nick Piggina4b06722007-10-16 01:24:48 -07002474 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475 int create;
2476
Nick Piggina4b06722007-10-16 01:24:48 -07002477 block_end = block_start + blocksize;
2478 bh->b_state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479 create = 1;
2480 if (block_start >= to)
2481 create = 0;
2482 ret = get_block(inode, block_in_file + block_in_page,
Nick Piggina4b06722007-10-16 01:24:48 -07002483 bh, create);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484 if (ret)
2485 goto failed;
Nick Piggina4b06722007-10-16 01:24:48 -07002486 if (!buffer_mapped(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 is_mapped_to_disk = 0;
Nick Piggina4b06722007-10-16 01:24:48 -07002488 if (buffer_new(bh))
2489 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2490 if (PageUptodate(page)) {
2491 set_buffer_uptodate(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492 continue;
Nick Piggina4b06722007-10-16 01:24:48 -07002493 }
2494 if (buffer_new(bh) || !buffer_mapped(bh)) {
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002495 zero_user_segments(page, block_start, from,
2496 to, block_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 continue;
2498 }
Nick Piggina4b06722007-10-16 01:24:48 -07002499 if (buffer_uptodate(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 continue; /* reiserfs does this */
2501 if (block_start < from || block_end > to) {
Nick Piggina4b06722007-10-16 01:24:48 -07002502 lock_buffer(bh);
2503 bh->b_end_io = end_buffer_read_nobh;
2504 submit_bh(READ, bh);
2505 nr_reads++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506 }
2507 }
2508
2509 if (nr_reads) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 /*
2511 * The page is locked, so these buffers are protected from
2512 * any VM or truncate activity. Hence we don't need to care
2513 * for the buffer_head refcounts.
2514 */
Nick Piggina4b06722007-10-16 01:24:48 -07002515 for (bh = head; bh; bh = bh->b_this_page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 wait_on_buffer(bh);
2517 if (!buffer_uptodate(bh))
2518 ret = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519 }
2520 if (ret)
2521 goto failed;
2522 }
2523
2524 if (is_mapped_to_disk)
2525 SetPageMappedToDisk(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526
Nick Piggin03158cd2007-10-16 01:25:25 -07002527 *fsdata = head; /* to be released by nobh_write_end */
Nick Piggina4b06722007-10-16 01:24:48 -07002528
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529 return 0;
2530
2531failed:
Nick Piggin03158cd2007-10-16 01:25:25 -07002532 BUG_ON(!ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 /*
Nick Piggina4b06722007-10-16 01:24:48 -07002534 * Error recovery is a bit difficult. We need to zero out blocks that
2535 * were newly allocated, and dirty them to ensure they get written out.
2536 * Buffers need to be attached to the page at this point, otherwise
2537 * the handling of potential IO errors during writeout would be hard
2538 * (could try doing synchronous writeout, but what if that fails too?)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 */
Nick Piggin03158cd2007-10-16 01:25:25 -07002540 attach_nobh_buffers(page, head);
2541 page_zero_new_buffers(page, from, to);
Nick Piggina4b06722007-10-16 01:24:48 -07002542
Nick Piggin03158cd2007-10-16 01:25:25 -07002543out_release:
2544 unlock_page(page);
2545 page_cache_release(page);
2546 *pagep = NULL;
Nick Piggina4b06722007-10-16 01:24:48 -07002547
npiggin@suse.de7bb46a62010-05-27 01:05:33 +10002548 return ret;
2549}
Nick Piggin03158cd2007-10-16 01:25:25 -07002550EXPORT_SYMBOL(nobh_write_begin);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551
Nick Piggin03158cd2007-10-16 01:25:25 -07002552int nobh_write_end(struct file *file, struct address_space *mapping,
2553 loff_t pos, unsigned len, unsigned copied,
2554 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555{
2556 struct inode *inode = page->mapping->host;
Nick Pigginefdc3132007-10-21 06:57:41 +02002557 struct buffer_head *head = fsdata;
Nick Piggin03158cd2007-10-16 01:25:25 -07002558 struct buffer_head *bh;
Dmitri Monakhov5b41e742008-03-28 14:15:52 -07002559 BUG_ON(fsdata != NULL && page_has_buffers(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560
Dave Kleikampd4cf1092009-02-06 14:59:26 -06002561 if (unlikely(copied < len) && head)
Dmitri Monakhov5b41e742008-03-28 14:15:52 -07002562 attach_nobh_buffers(page, head);
2563 if (page_has_buffers(page))
2564 return generic_write_end(file, mapping, pos, len,
2565 copied, page, fsdata);
Nick Piggina4b06722007-10-16 01:24:48 -07002566
Nick Piggin22c8ca72007-02-20 13:58:09 -08002567 SetPageUptodate(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568 set_page_dirty(page);
Nick Piggin03158cd2007-10-16 01:25:25 -07002569 if (pos+copied > inode->i_size) {
2570 i_size_write(inode, pos+copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571 mark_inode_dirty(inode);
2572 }
Nick Piggin03158cd2007-10-16 01:25:25 -07002573
2574 unlock_page(page);
2575 page_cache_release(page);
2576
Nick Piggin03158cd2007-10-16 01:25:25 -07002577 while (head) {
2578 bh = head;
2579 head = head->b_this_page;
2580 free_buffer_head(bh);
2581 }
2582
2583 return copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584}
Nick Piggin03158cd2007-10-16 01:25:25 -07002585EXPORT_SYMBOL(nobh_write_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586
2587/*
2588 * nobh_writepage() - based on block_full_write_page() except
2589 * that it tries to operate without attaching bufferheads to
2590 * the page.
2591 */
2592int nobh_writepage(struct page *page, get_block_t *get_block,
2593 struct writeback_control *wbc)
2594{
2595 struct inode * const inode = page->mapping->host;
2596 loff_t i_size = i_size_read(inode);
2597 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2598 unsigned offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599 int ret;
2600
2601 /* Is the page fully inside i_size? */
2602 if (page->index < end_index)
2603 goto out;
2604
2605 /* Is the page fully outside i_size? (truncate in progress) */
2606 offset = i_size & (PAGE_CACHE_SIZE-1);
2607 if (page->index >= end_index+1 || !offset) {
2608 /*
2609 * The page may have dirty, unmapped buffers. For example,
2610 * they may have been added in ext3_writepage(). Make them
2611 * freeable here, so the page does not leak.
2612 */
2613#if 0
2614 /* Not really sure about this - do we need this ? */
2615 if (page->mapping->a_ops->invalidatepage)
2616 page->mapping->a_ops->invalidatepage(page, offset);
2617#endif
2618 unlock_page(page);
2619 return 0; /* don't care */
2620 }
2621
2622 /*
2623 * The page straddles i_size. It must be zeroed out on each and every
2624 * writepage invocation because it may be mmapped. "A file is mapped
2625 * in multiples of the page size. For a file that is not a multiple of
2626 * the page size, the remaining memory is zeroed when mapped, and
2627 * writes to that region are not written out to the file."
2628 */
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002629 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630out:
2631 ret = mpage_writepage(page, get_block, wbc);
2632 if (ret == -EAGAIN)
Chris Mason35c80d52009-04-15 13:22:38 -04002633 ret = __block_write_full_page(inode, page, get_block, wbc,
2634 end_buffer_async_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635 return ret;
2636}
2637EXPORT_SYMBOL(nobh_writepage);
2638
Nick Piggin03158cd2007-10-16 01:25:25 -07002639int nobh_truncate_page(struct address_space *mapping,
2640 loff_t from, get_block_t *get_block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2643 unsigned offset = from & (PAGE_CACHE_SIZE-1);
Nick Piggin03158cd2007-10-16 01:25:25 -07002644 unsigned blocksize;
2645 sector_t iblock;
2646 unsigned length, pos;
2647 struct inode *inode = mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648 struct page *page;
Nick Piggin03158cd2007-10-16 01:25:25 -07002649 struct buffer_head map_bh;
2650 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651
Nick Piggin03158cd2007-10-16 01:25:25 -07002652 blocksize = 1 << inode->i_blkbits;
2653 length = offset & (blocksize - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654
Nick Piggin03158cd2007-10-16 01:25:25 -07002655 /* Block boundary? Nothing to do */
2656 if (!length)
2657 return 0;
2658
2659 length = blocksize - length;
2660 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2661
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 page = grab_cache_page(mapping, index);
Nick Piggin03158cd2007-10-16 01:25:25 -07002663 err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664 if (!page)
2665 goto out;
2666
Nick Piggin03158cd2007-10-16 01:25:25 -07002667 if (page_has_buffers(page)) {
2668has_buffers:
2669 unlock_page(page);
2670 page_cache_release(page);
2671 return block_truncate_page(mapping, from, get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672 }
Nick Piggin03158cd2007-10-16 01:25:25 -07002673
2674 /* Find the buffer that contains "offset" */
2675 pos = blocksize;
2676 while (offset >= pos) {
2677 iblock++;
2678 pos += blocksize;
2679 }
2680
Theodore Ts'o460bcf52009-05-12 07:37:56 -04002681 map_bh.b_size = blocksize;
2682 map_bh.b_state = 0;
Nick Piggin03158cd2007-10-16 01:25:25 -07002683 err = get_block(inode, iblock, &map_bh, 0);
2684 if (err)
2685 goto unlock;
2686 /* unmapped? It's a hole - nothing to do */
2687 if (!buffer_mapped(&map_bh))
2688 goto unlock;
2689
2690 /* Ok, it's mapped. Make sure it's up-to-date */
2691 if (!PageUptodate(page)) {
2692 err = mapping->a_ops->readpage(NULL, page);
2693 if (err) {
2694 page_cache_release(page);
2695 goto out;
2696 }
2697 lock_page(page);
2698 if (!PageUptodate(page)) {
2699 err = -EIO;
2700 goto unlock;
2701 }
2702 if (page_has_buffers(page))
2703 goto has_buffers;
2704 }
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002705 zero_user(page, offset, length);
Nick Piggin03158cd2007-10-16 01:25:25 -07002706 set_page_dirty(page);
2707 err = 0;
2708
2709unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 unlock_page(page);
2711 page_cache_release(page);
2712out:
Nick Piggin03158cd2007-10-16 01:25:25 -07002713 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714}
2715EXPORT_SYMBOL(nobh_truncate_page);
2716
2717int block_truncate_page(struct address_space *mapping,
2718 loff_t from, get_block_t *get_block)
2719{
2720 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2721 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2722 unsigned blocksize;
Andrew Morton54b21a72006-01-08 01:03:05 -08002723 sector_t iblock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724 unsigned length, pos;
2725 struct inode *inode = mapping->host;
2726 struct page *page;
2727 struct buffer_head *bh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 int err;
2729
2730 blocksize = 1 << inode->i_blkbits;
2731 length = offset & (blocksize - 1);
2732
2733 /* Block boundary? Nothing to do */
2734 if (!length)
2735 return 0;
2736
2737 length = blocksize - length;
Andrew Morton54b21a72006-01-08 01:03:05 -08002738 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739
2740 page = grab_cache_page(mapping, index);
2741 err = -ENOMEM;
2742 if (!page)
2743 goto out;
2744
2745 if (!page_has_buffers(page))
2746 create_empty_buffers(page, blocksize, 0);
2747
2748 /* Find the buffer that contains "offset" */
2749 bh = page_buffers(page);
2750 pos = blocksize;
2751 while (offset >= pos) {
2752 bh = bh->b_this_page;
2753 iblock++;
2754 pos += blocksize;
2755 }
2756
2757 err = 0;
2758 if (!buffer_mapped(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002759 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760 err = get_block(inode, iblock, bh, 0);
2761 if (err)
2762 goto unlock;
2763 /* unmapped? It's a hole - nothing to do */
2764 if (!buffer_mapped(bh))
2765 goto unlock;
2766 }
2767
2768 /* Ok, it's mapped. Make sure it's up-to-date */
2769 if (PageUptodate(page))
2770 set_buffer_uptodate(bh);
2771
David Chinner33a266d2007-02-12 00:51:41 -08002772 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773 err = -EIO;
2774 ll_rw_block(READ, 1, &bh);
2775 wait_on_buffer(bh);
2776 /* Uhhuh. Read error. Complain and punt. */
2777 if (!buffer_uptodate(bh))
2778 goto unlock;
2779 }
2780
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002781 zero_user(page, offset, length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782 mark_buffer_dirty(bh);
2783 err = 0;
2784
2785unlock:
2786 unlock_page(page);
2787 page_cache_release(page);
2788out:
2789 return err;
2790}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002791EXPORT_SYMBOL(block_truncate_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792
2793/*
2794 * The generic ->writepage function for buffer-backed address_spaces
Chris Mason35c80d52009-04-15 13:22:38 -04002795 * this form passes in the end_io handler used to finish the IO.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796 */
Chris Mason35c80d52009-04-15 13:22:38 -04002797int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2798 struct writeback_control *wbc, bh_end_io_t *handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799{
2800 struct inode * const inode = page->mapping->host;
2801 loff_t i_size = i_size_read(inode);
2802 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2803 unsigned offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804
2805 /* Is the page fully inside i_size? */
2806 if (page->index < end_index)
Chris Mason35c80d52009-04-15 13:22:38 -04002807 return __block_write_full_page(inode, page, get_block, wbc,
2808 handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809
2810 /* Is the page fully outside i_size? (truncate in progress) */
2811 offset = i_size & (PAGE_CACHE_SIZE-1);
2812 if (page->index >= end_index+1 || !offset) {
2813 /*
2814 * The page may have dirty, unmapped buffers. For example,
2815 * they may have been added in ext3_writepage(). Make them
2816 * freeable here, so the page does not leak.
2817 */
Jan Karaaaa40592005-10-30 15:00:16 -08002818 do_invalidatepage(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 unlock_page(page);
2820 return 0; /* don't care */
2821 }
2822
2823 /*
2824 * The page straddles i_size. It must be zeroed out on each and every
Adam Buchbinder2a61aa42009-12-11 16:35:40 -05002825 * writepage invocation because it may be mmapped. "A file is mapped
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826 * in multiples of the page size. For a file that is not a multiple of
2827 * the page size, the remaining memory is zeroed when mapped, and
2828 * writes to that region are not written out to the file."
2829 */
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002830 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
Chris Mason35c80d52009-04-15 13:22:38 -04002831 return __block_write_full_page(inode, page, get_block, wbc, handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002833EXPORT_SYMBOL(block_write_full_page_endio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834
Chris Mason35c80d52009-04-15 13:22:38 -04002835/*
2836 * The generic ->writepage function for buffer-backed address_spaces
2837 */
2838int block_write_full_page(struct page *page, get_block_t *get_block,
2839 struct writeback_control *wbc)
2840{
2841 return block_write_full_page_endio(page, get_block, wbc,
2842 end_buffer_async_write);
2843}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002844EXPORT_SYMBOL(block_write_full_page);
Chris Mason35c80d52009-04-15 13:22:38 -04002845
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2847 get_block_t *get_block)
2848{
2849 struct buffer_head tmp;
2850 struct inode *inode = mapping->host;
2851 tmp.b_state = 0;
2852 tmp.b_blocknr = 0;
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002853 tmp.b_size = 1 << inode->i_blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 get_block(inode, block, &tmp, 0);
2855 return tmp.b_blocknr;
2856}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002857EXPORT_SYMBOL(generic_block_bmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858
NeilBrown6712ecf2007-09-27 12:47:43 +02002859static void end_bio_bh_io_sync(struct bio *bio, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860{
2861 struct buffer_head *bh = bio->bi_private;
2862
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863 if (err == -EOPNOTSUPP) {
2864 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865 }
2866
Keith Mannthey08bafc02008-11-25 10:24:35 +01002867 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2868 set_bit(BH_Quiet, &bh->b_state);
2869
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2871 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872}
2873
2874int submit_bh(int rw, struct buffer_head * bh)
2875{
2876 struct bio *bio;
2877 int ret = 0;
2878
2879 BUG_ON(!buffer_locked(bh));
2880 BUG_ON(!buffer_mapped(bh));
2881 BUG_ON(!bh->b_end_io);
Aneesh Kumar K.V8fb0e342009-05-12 16:22:37 -04002882 BUG_ON(buffer_delay(bh));
2883 BUG_ON(buffer_unwritten(bh));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884
Jens Axboe48fd4f92008-08-22 10:00:36 +02002885 /*
Jens Axboe48fd4f92008-08-22 10:00:36 +02002886 * Only clear out a write error when rewriting
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887 */
Jens Axboe48fd4f92008-08-22 10:00:36 +02002888 if (test_set_buffer_req(bh) && (rw & WRITE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 clear_buffer_write_io_error(bh);
2890
2891 /*
2892 * from here on down, it's all bio -- do the initial mapping,
2893 * submit_bio -> generic_make_request may further map this bio around
2894 */
2895 bio = bio_alloc(GFP_NOIO, 1);
2896
2897 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2898 bio->bi_bdev = bh->b_bdev;
2899 bio->bi_io_vec[0].bv_page = bh->b_page;
2900 bio->bi_io_vec[0].bv_len = bh->b_size;
2901 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2902
2903 bio->bi_vcnt = 1;
2904 bio->bi_idx = 0;
2905 bio->bi_size = bh->b_size;
2906
2907 bio->bi_end_io = end_bio_bh_io_sync;
2908 bio->bi_private = bh;
2909
2910 bio_get(bio);
2911 submit_bio(rw, bio);
2912
2913 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2914 ret = -EOPNOTSUPP;
2915
2916 bio_put(bio);
2917 return ret;
2918}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002919EXPORT_SYMBOL(submit_bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920
2921/**
2922 * ll_rw_block: low-level access to block devices (DEPRECATED)
Christoph Hellwig9cb569d2010-08-11 17:06:24 +02002923 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 * @nr: number of &struct buffer_heads in the array
2925 * @bhs: array of pointers to &struct buffer_head
2926 *
Jan Karaa7662232005-09-06 15:19:10 -07002927 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2928 * requests an I/O operation on them, either a %READ or a %WRITE. The third
Christoph Hellwig9cb569d2010-08-11 17:06:24 +02002929 * %READA option is described in the documentation for generic_make_request()
2930 * which ll_rw_block() calls.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931 *
2932 * This function drops any buffer that it cannot get a lock on (with the
Christoph Hellwig9cb569d2010-08-11 17:06:24 +02002933 * BH_Lock state bit), any buffer that appears to be clean when doing a write
2934 * request, and any buffer that appears to be up-to-date when doing read
2935 * request. Further it marks as clean buffers that are processed for
2936 * writing (the buffer cache won't assume that they are actually clean
2937 * until the buffer gets unlocked).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938 *
2939 * ll_rw_block sets b_end_io to simple completion handler that marks
2940 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2941 * any waiters.
2942 *
2943 * All of the buffers must be for the same device, and must also be a
2944 * multiple of the current approved size for the device.
2945 */
2946void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2947{
2948 int i;
2949
2950 for (i = 0; i < nr; i++) {
2951 struct buffer_head *bh = bhs[i];
2952
Christoph Hellwig9cb569d2010-08-11 17:06:24 +02002953 if (!trylock_buffer(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954 continue;
Christoph Hellwig9cb569d2010-08-11 17:06:24 +02002955 if (rw == WRITE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956 if (test_clear_buffer_dirty(bh)) {
akpm@osdl.org76c30732005-04-16 15:24:07 -07002957 bh->b_end_io = end_buffer_write_sync;
OGAWA Hirofumie60e5c52006-02-03 03:04:43 -08002958 get_bh(bh);
Christoph Hellwig9cb569d2010-08-11 17:06:24 +02002959 submit_bh(WRITE, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960 continue;
2961 }
2962 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963 if (!buffer_uptodate(bh)) {
akpm@osdl.org76c30732005-04-16 15:24:07 -07002964 bh->b_end_io = end_buffer_read_sync;
OGAWA Hirofumie60e5c52006-02-03 03:04:43 -08002965 get_bh(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966 submit_bh(rw, bh);
2967 continue;
2968 }
2969 }
2970 unlock_buffer(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971 }
2972}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002973EXPORT_SYMBOL(ll_rw_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974
Christoph Hellwig9cb569d2010-08-11 17:06:24 +02002975void write_dirty_buffer(struct buffer_head *bh, int rw)
2976{
2977 lock_buffer(bh);
2978 if (!test_clear_buffer_dirty(bh)) {
2979 unlock_buffer(bh);
2980 return;
2981 }
2982 bh->b_end_io = end_buffer_write_sync;
2983 get_bh(bh);
2984 submit_bh(rw, bh);
2985}
2986EXPORT_SYMBOL(write_dirty_buffer);
2987
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988/*
2989 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2990 * and then start new I/O and then wait upon it. The caller must have a ref on
2991 * the buffer_head.
2992 */
Christoph Hellwig87e99512010-08-11 17:05:45 +02002993int __sync_dirty_buffer(struct buffer_head *bh, int rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994{
2995 int ret = 0;
2996
2997 WARN_ON(atomic_read(&bh->b_count) < 1);
2998 lock_buffer(bh);
2999 if (test_clear_buffer_dirty(bh)) {
3000 get_bh(bh);
3001 bh->b_end_io = end_buffer_write_sync;
Christoph Hellwig87e99512010-08-11 17:05:45 +02003002 ret = submit_bh(rw, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003 wait_on_buffer(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 if (!ret && !buffer_uptodate(bh))
3005 ret = -EIO;
3006 } else {
3007 unlock_buffer(bh);
3008 }
3009 return ret;
3010}
Christoph Hellwig87e99512010-08-11 17:05:45 +02003011EXPORT_SYMBOL(__sync_dirty_buffer);
3012
3013int sync_dirty_buffer(struct buffer_head *bh)
3014{
3015 return __sync_dirty_buffer(bh, WRITE_SYNC);
3016}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07003017EXPORT_SYMBOL(sync_dirty_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003018
3019/*
3020 * try_to_free_buffers() checks if all the buffers on this particular page
3021 * are unused, and releases them if so.
3022 *
3023 * Exclusion against try_to_free_buffers may be obtained by either
3024 * locking the page or by holding its mapping's private_lock.
3025 *
3026 * If the page is dirty but all the buffers are clean then we need to
3027 * be sure to mark the page clean as well. This is because the page
3028 * may be against a block device, and a later reattachment of buffers
3029 * to a dirty page will set *all* buffers dirty. Which would corrupt
3030 * filesystem data on the same device.
3031 *
3032 * The same applies to regular filesystem pages: if all the buffers are
3033 * clean then we set the page clean and proceed. To do that, we require
3034 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3035 * private_lock.
3036 *
3037 * try_to_free_buffers() is non-blocking.
3038 */
3039static inline int buffer_busy(struct buffer_head *bh)
3040{
3041 return atomic_read(&bh->b_count) |
3042 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3043}
3044
3045static int
3046drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3047{
3048 struct buffer_head *head = page_buffers(page);
3049 struct buffer_head *bh;
3050
3051 bh = head;
3052 do {
akpm@osdl.orgde7d5a32005-05-01 08:58:39 -07003053 if (buffer_write_io_error(bh) && page->mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054 set_bit(AS_EIO, &page->mapping->flags);
3055 if (buffer_busy(bh))
3056 goto failed;
3057 bh = bh->b_this_page;
3058 } while (bh != head);
3059
3060 do {
3061 struct buffer_head *next = bh->b_this_page;
3062
Jan Kara535ee2f2008-02-08 04:21:59 -08003063 if (bh->b_assoc_map)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064 __remove_assoc_queue(bh);
3065 bh = next;
3066 } while (bh != head);
3067 *buffers_to_free = head;
3068 __clear_page_buffers(page);
3069 return 1;
3070failed:
3071 return 0;
3072}
3073
3074int try_to_free_buffers(struct page *page)
3075{
3076 struct address_space * const mapping = page->mapping;
3077 struct buffer_head *buffers_to_free = NULL;
3078 int ret = 0;
3079
3080 BUG_ON(!PageLocked(page));
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003081 if (PageWriteback(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003082 return 0;
3083
3084 if (mapping == NULL) { /* can this still happen? */
3085 ret = drop_buffers(page, &buffers_to_free);
3086 goto out;
3087 }
3088
3089 spin_lock(&mapping->private_lock);
3090 ret = drop_buffers(page, &buffers_to_free);
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003091
3092 /*
3093 * If the filesystem writes its buffers by hand (eg ext3)
3094 * then we can have clean buffers against a dirty page. We
3095 * clean the page here; otherwise the VM will never notice
3096 * that the filesystem did any IO at all.
3097 *
3098 * Also, during truncate, discard_buffer will have marked all
3099 * the page's buffers clean. We discover that here and clean
3100 * the page also.
Nick Piggin87df7242007-01-30 14:36:27 +11003101 *
3102 * private_lock must be held over this entire operation in order
3103 * to synchronise against __set_page_dirty_buffers and prevent the
3104 * dirty bit from being lost.
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003105 */
3106 if (ret)
3107 cancel_dirty_page(page, PAGE_CACHE_SIZE);
Nick Piggin87df7242007-01-30 14:36:27 +11003108 spin_unlock(&mapping->private_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003109out:
3110 if (buffers_to_free) {
3111 struct buffer_head *bh = buffers_to_free;
3112
3113 do {
3114 struct buffer_head *next = bh->b_this_page;
3115 free_buffer_head(bh);
3116 bh = next;
3117 } while (bh != buffers_to_free);
3118 }
3119 return ret;
3120}
3121EXPORT_SYMBOL(try_to_free_buffers);
3122
Linus Torvalds1da177e2005-04-16 15:20:36 -07003123/*
3124 * There are no bdflush tunables left. But distributions are
3125 * still running obsolete flush daemons, so we terminate them here.
3126 *
3127 * Use of bdflush() is deprecated and will be removed in a future kernel.
Jens Axboe5b0830c2009-09-23 19:37:09 +02003128 * The `flush-X' kernel threads fully replace bdflush daemons and this call.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129 */
Heiko Carstensbdc480e2009-01-14 14:14:12 +01003130SYSCALL_DEFINE2(bdflush, int, func, long, data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003131{
3132 static int msg_count;
3133
3134 if (!capable(CAP_SYS_ADMIN))
3135 return -EPERM;
3136
3137 if (msg_count < 5) {
3138 msg_count++;
3139 printk(KERN_INFO
3140 "warning: process `%s' used the obsolete bdflush"
3141 " system call\n", current->comm);
3142 printk(KERN_INFO "Fix your initscripts?\n");
3143 }
3144
3145 if (func == 1)
3146 do_exit(0);
3147 return 0;
3148}
3149
3150/*
3151 * Buffer-head allocation
3152 */
Christoph Lametere18b8902006-12-06 20:33:20 -08003153static struct kmem_cache *bh_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154
3155/*
3156 * Once the number of bh's in the machine exceeds this level, we start
3157 * stripping them in writeback.
3158 */
3159static int max_buffer_heads;
3160
3161int buffer_heads_over_limit;
3162
3163struct bh_accounting {
3164 int nr; /* Number of live bh's */
3165 int ratelimit; /* Limit cacheline bouncing */
3166};
3167
3168static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3169
3170static void recalc_bh_state(void)
3171{
3172 int i;
3173 int tot = 0;
3174
Christoph Lameteree1be862010-12-06 11:40:05 -06003175 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003176 return;
Christoph Lameterc7b92512010-12-06 11:16:28 -06003177 __this_cpu_write(bh_accounting.ratelimit, 0);
Eric Dumazet8a143422006-03-24 03:18:10 -08003178 for_each_online_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003179 tot += per_cpu(bh_accounting, i).nr;
3180 buffer_heads_over_limit = (tot > max_buffer_heads);
3181}
Christoph Lameterc7b92512010-12-06 11:16:28 -06003182
Al Virodd0fc662005-10-07 07:46:04 +01003183struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184{
Richard Kennedy019b4d12010-03-10 15:20:33 -08003185 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186 if (ret) {
Christoph Lametera35afb82007-05-16 22:10:57 -07003187 INIT_LIST_HEAD(&ret->b_assoc_buffers);
Christoph Lameterc7b92512010-12-06 11:16:28 -06003188 preempt_disable();
3189 __this_cpu_inc(bh_accounting.nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190 recalc_bh_state();
Christoph Lameterc7b92512010-12-06 11:16:28 -06003191 preempt_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003192 }
3193 return ret;
3194}
3195EXPORT_SYMBOL(alloc_buffer_head);
3196
3197void free_buffer_head(struct buffer_head *bh)
3198{
3199 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3200 kmem_cache_free(bh_cachep, bh);
Christoph Lameterc7b92512010-12-06 11:16:28 -06003201 preempt_disable();
3202 __this_cpu_dec(bh_accounting.nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203 recalc_bh_state();
Christoph Lameterc7b92512010-12-06 11:16:28 -06003204 preempt_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205}
3206EXPORT_SYMBOL(free_buffer_head);
3207
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208static void buffer_exit_cpu(int cpu)
3209{
3210 int i;
3211 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3212
3213 for (i = 0; i < BH_LRU_SIZE; i++) {
3214 brelse(b->bhs[i]);
3215 b->bhs[i] = NULL;
3216 }
Christoph Lameterc7b92512010-12-06 11:16:28 -06003217 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
Eric Dumazet8a143422006-03-24 03:18:10 -08003218 per_cpu(bh_accounting, cpu).nr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003219}
3220
3221static int buffer_cpu_notify(struct notifier_block *self,
3222 unsigned long action, void *hcpu)
3223{
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07003224 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225 buffer_exit_cpu((unsigned long)hcpu);
3226 return NOTIFY_OK;
3227}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228
Aneesh Kumar K.V389d1b02008-01-28 23:58:26 -05003229/**
Randy Dunlapa6b91912008-03-19 17:01:00 -07003230 * bh_uptodate_or_lock - Test whether the buffer is uptodate
Aneesh Kumar K.V389d1b02008-01-28 23:58:26 -05003231 * @bh: struct buffer_head
3232 *
3233 * Return true if the buffer is up-to-date and false,
3234 * with the buffer locked, if not.
3235 */
3236int bh_uptodate_or_lock(struct buffer_head *bh)
3237{
3238 if (!buffer_uptodate(bh)) {
3239 lock_buffer(bh);
3240 if (!buffer_uptodate(bh))
3241 return 0;
3242 unlock_buffer(bh);
3243 }
3244 return 1;
3245}
3246EXPORT_SYMBOL(bh_uptodate_or_lock);
3247
3248/**
Randy Dunlapa6b91912008-03-19 17:01:00 -07003249 * bh_submit_read - Submit a locked buffer for reading
Aneesh Kumar K.V389d1b02008-01-28 23:58:26 -05003250 * @bh: struct buffer_head
3251 *
3252 * Returns zero on success and -EIO on error.
3253 */
3254int bh_submit_read(struct buffer_head *bh)
3255{
3256 BUG_ON(!buffer_locked(bh));
3257
3258 if (buffer_uptodate(bh)) {
3259 unlock_buffer(bh);
3260 return 0;
3261 }
3262
3263 get_bh(bh);
3264 bh->b_end_io = end_buffer_read_sync;
3265 submit_bh(READ, bh);
3266 wait_on_buffer(bh);
3267 if (buffer_uptodate(bh))
3268 return 0;
3269 return -EIO;
3270}
3271EXPORT_SYMBOL(bh_submit_read);
3272
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273void __init buffer_init(void)
3274{
3275 int nrpages;
3276
Christoph Lameterb98938c2008-02-04 22:28:36 -08003277 bh_cachep = kmem_cache_create("buffer_head",
3278 sizeof(struct buffer_head), 0,
3279 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3280 SLAB_MEM_SPREAD),
Richard Kennedy019b4d12010-03-10 15:20:33 -08003281 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282
3283 /*
3284 * Limit the bh occupancy to 10% of ZONE_NORMAL
3285 */
3286 nrpages = (nr_free_buffer_pages() * 10) / 100;
3287 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3288 hotcpu_notifier(buffer_cpu_notify, 0);
3289}