blob: 698c6b2cc462ab067debcab84f77cffcb785a9cd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/kernel.h>
22#include <linux/syscalls.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
Randy Dunlap16f7e0f2006-01-11 12:17:46 -080027#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/blkdev.h>
29#include <linux/file.h>
30#include <linux/quotaops.h>
31#include <linux/highmem.h>
32#include <linux/module.h>
33#include <linux/writeback.h>
34#include <linux/hash.h>
35#include <linux/suspend.h>
36#include <linux/buffer_head.h>
Andrew Morton55e829a2006-12-10 02:19:27 -080037#include <linux/task_io_accounting_ops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/bio.h>
39#include <linux/notifier.h>
40#include <linux/cpu.h>
41#include <linux/bitops.h>
42#include <linux/mpage.h>
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070043#include <linux/bit_spinlock.h>
Dan Magenheimerc515e1f2011-05-26 10:01:43 -060044#include <linux/cleancache.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
48#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
49
50inline void
51init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
52{
53 bh->b_end_io = handler;
54 bh->b_private = private;
55}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -070056EXPORT_SYMBOL(init_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Jens Axboe7eaceac2011-03-10 08:52:07 +010058static int sleep_on_buffer(void *word)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 io_schedule();
61 return 0;
62}
63
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -080064void __lock_buffer(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -070065{
Jens Axboe7eaceac2011-03-10 08:52:07 +010066 wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 TASK_UNINTERRUPTIBLE);
68}
69EXPORT_SYMBOL(__lock_buffer);
70
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -080071void unlock_buffer(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072{
Nick Piggin51b07fc2008-10-18 20:27:00 -070073 clear_bit_unlock(BH_Lock, &bh->b_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 smp_mb__after_clear_bit();
75 wake_up_bit(&bh->b_state, BH_Lock);
76}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -070077EXPORT_SYMBOL(unlock_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
79/*
80 * Block until a buffer comes unlocked. This doesn't stop it
81 * from becoming locked again - you have to lock it yourself
82 * if you want to preserve its state.
83 */
84void __wait_on_buffer(struct buffer_head * bh)
85{
Jens Axboe7eaceac2011-03-10 08:52:07 +010086 wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -070088EXPORT_SYMBOL(__wait_on_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
90static void
91__clear_page_buffers(struct page *page)
92{
93 ClearPagePrivate(page);
Hugh Dickins4c21e2f2005-10-29 18:16:40 -070094 set_page_private(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 page_cache_release(page);
96}
97
Keith Mannthey08bafc02008-11-25 10:24:35 +010098
99static int quiet_error(struct buffer_head *bh)
100{
101 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
102 return 0;
103 return 1;
104}
105
106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107static void buffer_io_error(struct buffer_head *bh)
108{
109 char b[BDEVNAME_SIZE];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
111 bdevname(bh->b_bdev, b),
112 (unsigned long long)bh->b_blocknr);
113}
114
115/*
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700116 * End-of-IO handler helper function which does not touch the bh after
117 * unlocking it.
118 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
119 * a race there is benign: unlock_buffer() only use the bh's address for
120 * hashing after unlocking the buffer, so it doesn't actually touch the bh
121 * itself.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 */
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700123static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124{
125 if (uptodate) {
126 set_buffer_uptodate(bh);
127 } else {
128 /* This happens, due to failed READA attempts. */
129 clear_buffer_uptodate(bh);
130 }
131 unlock_buffer(bh);
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700132}
133
134/*
135 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
136 * unlock the buffer. This is what ll_rw_block uses too.
137 */
138void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
139{
140 __end_buffer_read_notouch(bh, uptodate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 put_bh(bh);
142}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700143EXPORT_SYMBOL(end_buffer_read_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
145void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
146{
147 char b[BDEVNAME_SIZE];
148
149 if (uptodate) {
150 set_buffer_uptodate(bh);
151 } else {
Christoph Hellwig0edd55f2010-08-18 05:29:23 -0400152 if (!quiet_error(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 buffer_io_error(bh);
154 printk(KERN_WARNING "lost page write due to "
155 "I/O error on %s\n",
156 bdevname(bh->b_bdev, b));
157 }
158 set_buffer_write_io_error(bh);
159 clear_buffer_uptodate(bh);
160 }
161 unlock_buffer(bh);
162 put_bh(bh);
163}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700164EXPORT_SYMBOL(end_buffer_write_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
166/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 * Various filesystems appear to want __find_get_block to be non-blocking.
168 * But it's the page lock which protects the buffers. To get around this,
169 * we get exclusion from try_to_free_buffers with the blockdev mapping's
170 * private_lock.
171 *
172 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
173 * may be quite high. This code could TryLock the page, and if that
174 * succeeds, there is no need to take private_lock. (But if
175 * private_lock is contended then so is mapping->tree_lock).
176 */
177static struct buffer_head *
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -0800178__find_get_block_slow(struct block_device *bdev, sector_t block)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179{
180 struct inode *bd_inode = bdev->bd_inode;
181 struct address_space *bd_mapping = bd_inode->i_mapping;
182 struct buffer_head *ret = NULL;
183 pgoff_t index;
184 struct buffer_head *bh;
185 struct buffer_head *head;
186 struct page *page;
187 int all_mapped = 1;
188
189 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
190 page = find_get_page(bd_mapping, index);
191 if (!page)
192 goto out;
193
194 spin_lock(&bd_mapping->private_lock);
195 if (!page_has_buffers(page))
196 goto out_unlock;
197 head = page_buffers(page);
198 bh = head;
199 do {
Nikanth Karthikesan97f76d32009-04-02 16:56:46 -0700200 if (!buffer_mapped(bh))
201 all_mapped = 0;
202 else if (bh->b_blocknr == block) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 ret = bh;
204 get_bh(bh);
205 goto out_unlock;
206 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 bh = bh->b_this_page;
208 } while (bh != head);
209
210 /* we might be here because some of the buffers on this page are
211 * not mapped. This is due to various races between
212 * file io on the block device and getblk. It gets dealt with
213 * elsewhere, don't buffer_error if we had some unmapped buffers
214 */
215 if (all_mapped) {
216 printk("__find_get_block_slow() failed. "
217 "block=%llu, b_blocknr=%llu\n",
Badari Pulavarty205f87f2006-03-26 01:38:00 -0800218 (unsigned long long)block,
219 (unsigned long long)bh->b_blocknr);
220 printk("b_state=0x%08lx, b_size=%zu\n",
221 bh->b_state, bh->b_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
223 }
224out_unlock:
225 spin_unlock(&bd_mapping->private_lock);
226 page_cache_release(page);
227out:
228 return ret;
229}
230
231/* If invalidate_buffers() will trash dirty buffers, it means some kind
232 of fs corruption is going on. Trashing dirty data always imply losing
233 information that was supposed to be just stored on the physical layer
234 by the user.
235
236 Thus invalidate_buffers in general usage is not allwowed to trash
237 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
238 be preserved. These buffers are simply skipped.
239
240 We also skip buffers which are still in use. For example this can
241 happen if a userspace program is reading the block device.
242
243 NOTE: In the case where the user removed a removable-media-disk even if
244 there's still dirty data not synced on disk (due a bug in the device driver
245 or due an error of the user), by not destroying the dirty buffers we could
246 generate corruption also on the next media inserted, thus a parameter is
247 necessary to handle this case in the most safe way possible (trying
248 to not corrupt also the new disk inserted with the data belonging to
249 the old now corrupted disk). Also for the ramdisk the natural thing
250 to do in order to release the ramdisk memory is to destroy dirty buffers.
251
252 These are two special cases. Normal usage imply the device driver
253 to issue a sync on the device (without waiting I/O completion) and
254 then an invalidate_buffers call that doesn't trash dirty buffers.
255
256 For handling cache coherency with the blkdev pagecache the 'update' case
257 is been introduced. It is needed to re-read from disk any pinned
258 buffer. NOTE: re-reading from disk is destructive so we can do it only
259 when we assume nobody is changing the buffercache under our I/O and when
260 we think the disk contains more recent information than the buffercache.
261 The update == 1 pass marks the buffers we need to update, the update == 2
262 pass does the actual I/O. */
Peter Zijlstraf98393a2007-05-06 14:49:54 -0700263void invalidate_bdev(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264{
Andrew Morton0e1dfc62006-07-30 03:03:28 -0700265 struct address_space *mapping = bdev->bd_inode->i_mapping;
266
267 if (mapping->nrpages == 0)
268 return;
269
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 invalidate_bh_lrus();
Tejun Heofa4b9072010-05-15 20:09:27 +0200271 lru_add_drain_all(); /* make sure all lru add caches are flushed */
Andrew Mortonfc0ecff2007-02-10 01:45:39 -0800272 invalidate_mapping_pages(mapping, 0, -1);
Dan Magenheimerc515e1f2011-05-26 10:01:43 -0600273 /* 99% of the time, we don't need to flush the cleancache on the bdev.
274 * But, for the strange corners, lets be cautious
275 */
276 cleancache_flush_inode(mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700278EXPORT_SYMBOL(invalidate_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279
280/*
Jens Axboe5b0830c2009-09-23 19:37:09 +0200281 * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 */
283static void free_more_memory(void)
284{
Mel Gorman19770b32008-04-28 02:12:18 -0700285 struct zone *zone;
Mel Gorman0e884602008-04-28 02:12:14 -0700286 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
Jens Axboe03ba3782009-09-09 09:08:54 +0200288 wakeup_flusher_threads(1024);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 yield();
290
Mel Gorman0e884602008-04-28 02:12:14 -0700291 for_each_online_node(nid) {
Mel Gorman19770b32008-04-28 02:12:18 -0700292 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
293 gfp_zone(GFP_NOFS), NULL,
294 &zone);
295 if (zone)
Mel Gorman54a6eb52008-04-28 02:12:16 -0700296 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
KAMEZAWA Hiroyuki327c0e92009-03-31 15:23:31 -0700297 GFP_NOFS, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 }
299}
300
301/*
302 * I/O completion handler for block_read_full_page() - pages
303 * which come unlocked at the end of I/O.
304 */
305static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
306{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 unsigned long flags;
Nick Piggina3972202005-07-07 17:56:56 -0700308 struct buffer_head *first;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 struct buffer_head *tmp;
310 struct page *page;
311 int page_uptodate = 1;
312
313 BUG_ON(!buffer_async_read(bh));
314
315 page = bh->b_page;
316 if (uptodate) {
317 set_buffer_uptodate(bh);
318 } else {
319 clear_buffer_uptodate(bh);
Keith Mannthey08bafc02008-11-25 10:24:35 +0100320 if (!quiet_error(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 buffer_io_error(bh);
322 SetPageError(page);
323 }
324
325 /*
326 * Be _very_ careful from here on. Bad things can happen if
327 * two buffer heads end IO at almost the same time and both
328 * decide that the page is now completely done.
329 */
Nick Piggina3972202005-07-07 17:56:56 -0700330 first = page_buffers(page);
331 local_irq_save(flags);
332 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 clear_buffer_async_read(bh);
334 unlock_buffer(bh);
335 tmp = bh;
336 do {
337 if (!buffer_uptodate(tmp))
338 page_uptodate = 0;
339 if (buffer_async_read(tmp)) {
340 BUG_ON(!buffer_locked(tmp));
341 goto still_busy;
342 }
343 tmp = tmp->b_this_page;
344 } while (tmp != bh);
Nick Piggina3972202005-07-07 17:56:56 -0700345 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
346 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
348 /*
349 * If none of the buffers had errors and they are all
350 * uptodate then we can set the page uptodate.
351 */
352 if (page_uptodate && !PageError(page))
353 SetPageUptodate(page);
354 unlock_page(page);
355 return;
356
357still_busy:
Nick Piggina3972202005-07-07 17:56:56 -0700358 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
359 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 return;
361}
362
363/*
364 * Completion handler for block_write_full_page() - pages which are unlocked
365 * during I/O, and which have PageWriteback cleared upon I/O completion.
366 */
Chris Mason35c80d52009-04-15 13:22:38 -0400367void end_buffer_async_write(struct buffer_head *bh, int uptodate)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368{
369 char b[BDEVNAME_SIZE];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 unsigned long flags;
Nick Piggina3972202005-07-07 17:56:56 -0700371 struct buffer_head *first;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 struct buffer_head *tmp;
373 struct page *page;
374
375 BUG_ON(!buffer_async_write(bh));
376
377 page = bh->b_page;
378 if (uptodate) {
379 set_buffer_uptodate(bh);
380 } else {
Keith Mannthey08bafc02008-11-25 10:24:35 +0100381 if (!quiet_error(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 buffer_io_error(bh);
383 printk(KERN_WARNING "lost page write due to "
384 "I/O error on %s\n",
385 bdevname(bh->b_bdev, b));
386 }
387 set_bit(AS_EIO, &page->mapping->flags);
Jan Kara58ff4072006-10-17 00:10:19 -0700388 set_buffer_write_io_error(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 clear_buffer_uptodate(bh);
390 SetPageError(page);
391 }
392
Nick Piggina3972202005-07-07 17:56:56 -0700393 first = page_buffers(page);
394 local_irq_save(flags);
395 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
396
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 clear_buffer_async_write(bh);
398 unlock_buffer(bh);
399 tmp = bh->b_this_page;
400 while (tmp != bh) {
401 if (buffer_async_write(tmp)) {
402 BUG_ON(!buffer_locked(tmp));
403 goto still_busy;
404 }
405 tmp = tmp->b_this_page;
406 }
Nick Piggina3972202005-07-07 17:56:56 -0700407 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
408 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 end_page_writeback(page);
410 return;
411
412still_busy:
Nick Piggina3972202005-07-07 17:56:56 -0700413 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
414 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 return;
416}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700417EXPORT_SYMBOL(end_buffer_async_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
419/*
420 * If a page's buffers are under async readin (end_buffer_async_read
421 * completion) then there is a possibility that another thread of
422 * control could lock one of the buffers after it has completed
423 * but while some of the other buffers have not completed. This
424 * locked buffer would confuse end_buffer_async_read() into not unlocking
425 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
426 * that this buffer is not under async I/O.
427 *
428 * The page comes unlocked when it has no locked buffer_async buffers
429 * left.
430 *
431 * PageLocked prevents anyone starting new async I/O reads any of
432 * the buffers.
433 *
434 * PageWriteback is used to prevent simultaneous writeout of the same
435 * page.
436 *
437 * PageLocked prevents anyone from starting writeback of a page which is
438 * under read I/O (PageWriteback is only ever set against a locked page).
439 */
440static void mark_buffer_async_read(struct buffer_head *bh)
441{
442 bh->b_end_io = end_buffer_async_read;
443 set_buffer_async_read(bh);
444}
445
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700446static void mark_buffer_async_write_endio(struct buffer_head *bh,
447 bh_end_io_t *handler)
Chris Mason35c80d52009-04-15 13:22:38 -0400448{
449 bh->b_end_io = handler;
450 set_buffer_async_write(bh);
451}
452
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453void mark_buffer_async_write(struct buffer_head *bh)
454{
Chris Mason35c80d52009-04-15 13:22:38 -0400455 mark_buffer_async_write_endio(bh, end_buffer_async_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456}
457EXPORT_SYMBOL(mark_buffer_async_write);
458
459
460/*
461 * fs/buffer.c contains helper functions for buffer-backed address space's
462 * fsync functions. A common requirement for buffer-based filesystems is
463 * that certain data from the backing blockdev needs to be written out for
464 * a successful fsync(). For example, ext2 indirect blocks need to be
465 * written back and waited upon before fsync() returns.
466 *
467 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
468 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
469 * management of a list of dependent buffers at ->i_mapping->private_list.
470 *
471 * Locking is a little subtle: try_to_free_buffers() will remove buffers
472 * from their controlling inode's queue when they are being freed. But
473 * try_to_free_buffers() will be operating against the *blockdev* mapping
474 * at the time, not against the S_ISREG file which depends on those buffers.
475 * So the locking for private_list is via the private_lock in the address_space
476 * which backs the buffers. Which is different from the address_space
477 * against which the buffers are listed. So for a particular address_space,
478 * mapping->private_lock does *not* protect mapping->private_list! In fact,
479 * mapping->private_list will always be protected by the backing blockdev's
480 * ->private_lock.
481 *
482 * Which introduces a requirement: all buffers on an address_space's
483 * ->private_list must be from the same address_space: the blockdev's.
484 *
485 * address_spaces which do not place buffers at ->private_list via these
486 * utility functions are free to use private_lock and private_list for
487 * whatever they want. The only requirement is that list_empty(private_list)
488 * be true at clear_inode() time.
489 *
490 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
491 * filesystems should do that. invalidate_inode_buffers() should just go
492 * BUG_ON(!list_empty).
493 *
494 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
495 * take an address_space, not an inode. And it should be called
496 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
497 * queued up.
498 *
499 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
500 * list if it is already on a list. Because if the buffer is on a list,
501 * it *must* already be on the right one. If not, the filesystem is being
502 * silly. This will save a ton of locking. But first we have to ensure
503 * that buffers are taken *off* the old inode's list when they are freed
504 * (presumably in truncate). That requires careful auditing of all
505 * filesystems (do it inside bforget()). It could also be done by bringing
506 * b_inode back.
507 */
508
509/*
510 * The buffer's backing address_space's private_lock must be held
511 */
Thomas Petazzonidbacefc2008-07-29 22:33:47 -0700512static void __remove_assoc_queue(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513{
514 list_del_init(&bh->b_assoc_buffers);
Jan Kara58ff4072006-10-17 00:10:19 -0700515 WARN_ON(!bh->b_assoc_map);
516 if (buffer_write_io_error(bh))
517 set_bit(AS_EIO, &bh->b_assoc_map->flags);
518 bh->b_assoc_map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519}
520
521int inode_has_buffers(struct inode *inode)
522{
523 return !list_empty(&inode->i_data.private_list);
524}
525
526/*
527 * osync is designed to support O_SYNC io. It waits synchronously for
528 * all already-submitted IO to complete, but does not queue any new
529 * writes to the disk.
530 *
531 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
532 * you dirty the buffers, and then use osync_inode_buffers to wait for
533 * completion. Any other dirty buffers which are not yet queued for
534 * write will not be flushed to disk by the osync.
535 */
536static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
537{
538 struct buffer_head *bh;
539 struct list_head *p;
540 int err = 0;
541
542 spin_lock(lock);
543repeat:
544 list_for_each_prev(p, list) {
545 bh = BH_ENTRY(p);
546 if (buffer_locked(bh)) {
547 get_bh(bh);
548 spin_unlock(lock);
549 wait_on_buffer(bh);
550 if (!buffer_uptodate(bh))
551 err = -EIO;
552 brelse(bh);
553 spin_lock(lock);
554 goto repeat;
555 }
556 }
557 spin_unlock(lock);
558 return err;
559}
560
Al Viro01a05b32010-03-23 06:06:58 -0400561static void do_thaw_one(struct super_block *sb, void *unused)
562{
563 char b[BDEVNAME_SIZE];
564 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
565 printk(KERN_WARNING "Emergency Thaw on %s\n",
566 bdevname(sb->s_bdev, b));
567}
568
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -0700569static void do_thaw_all(struct work_struct *work)
Eric Sandeenc2d75432009-03-31 15:23:46 -0700570{
Al Viro01a05b32010-03-23 06:06:58 -0400571 iterate_supers(do_thaw_one, NULL);
Jens Axboe053c5252009-04-08 13:44:08 +0200572 kfree(work);
Eric Sandeenc2d75432009-03-31 15:23:46 -0700573 printk(KERN_WARNING "Emergency Thaw complete\n");
574}
575
576/**
577 * emergency_thaw_all -- forcibly thaw every frozen filesystem
578 *
579 * Used for emergency unfreeze of all filesystems via SysRq
580 */
581void emergency_thaw_all(void)
582{
Jens Axboe053c5252009-04-08 13:44:08 +0200583 struct work_struct *work;
584
585 work = kmalloc(sizeof(*work), GFP_ATOMIC);
586 if (work) {
587 INIT_WORK(work, do_thaw_all);
588 schedule_work(work);
589 }
Eric Sandeenc2d75432009-03-31 15:23:46 -0700590}
591
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592/**
Randy Dunlap78a4a502008-02-29 22:02:31 -0800593 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
Martin Waitz67be2dd2005-05-01 08:59:26 -0700594 * @mapping: the mapping which wants those buffers written
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 *
596 * Starts I/O against the buffers at mapping->private_list, and waits upon
597 * that I/O.
598 *
Martin Waitz67be2dd2005-05-01 08:59:26 -0700599 * Basically, this is a convenience function for fsync().
600 * @mapping is a file or directory which needs those buffers to be written for
601 * a successful fsync().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 */
603int sync_mapping_buffers(struct address_space *mapping)
604{
605 struct address_space *buffer_mapping = mapping->assoc_mapping;
606
607 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
608 return 0;
609
610 return fsync_buffers_list(&buffer_mapping->private_lock,
611 &mapping->private_list);
612}
613EXPORT_SYMBOL(sync_mapping_buffers);
614
615/*
616 * Called when we've recently written block `bblock', and it is known that
617 * `bblock' was for a buffer_boundary() buffer. This means that the block at
618 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
619 * dirty, schedule it for IO. So that indirects merge nicely with their data.
620 */
621void write_boundary_block(struct block_device *bdev,
622 sector_t bblock, unsigned blocksize)
623{
624 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
625 if (bh) {
626 if (buffer_dirty(bh))
627 ll_rw_block(WRITE, 1, &bh);
628 put_bh(bh);
629 }
630}
631
632void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
633{
634 struct address_space *mapping = inode->i_mapping;
635 struct address_space *buffer_mapping = bh->b_page->mapping;
636
637 mark_buffer_dirty(bh);
638 if (!mapping->assoc_mapping) {
639 mapping->assoc_mapping = buffer_mapping;
640 } else {
Eric Sesterhenne827f922006-03-26 18:24:46 +0200641 BUG_ON(mapping->assoc_mapping != buffer_mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 }
Jan Kara535ee2f2008-02-08 04:21:59 -0800643 if (!bh->b_assoc_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 spin_lock(&buffer_mapping->private_lock);
645 list_move_tail(&bh->b_assoc_buffers,
646 &mapping->private_list);
Jan Kara58ff4072006-10-17 00:10:19 -0700647 bh->b_assoc_map = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 spin_unlock(&buffer_mapping->private_lock);
649 }
650}
651EXPORT_SYMBOL(mark_buffer_dirty_inode);
652
653/*
Nick Piggin787d2212007-07-17 04:03:34 -0700654 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
655 * dirty.
656 *
657 * If warn is true, then emit a warning if the page is not uptodate and has
658 * not been truncated.
659 */
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700660static void __set_page_dirty(struct page *page,
Nick Piggin787d2212007-07-17 04:03:34 -0700661 struct address_space *mapping, int warn)
662{
Nick Piggin19fd6232008-07-25 19:45:32 -0700663 spin_lock_irq(&mapping->tree_lock);
Nick Piggin787d2212007-07-17 04:03:34 -0700664 if (page->mapping) { /* Race with truncate? */
665 WARN_ON_ONCE(warn && !PageUptodate(page));
Edward Shishkine3a7cca2009-03-31 15:19:39 -0700666 account_page_dirtied(page, mapping);
Nick Piggin787d2212007-07-17 04:03:34 -0700667 radix_tree_tag_set(&mapping->page_tree,
668 page_index(page), PAGECACHE_TAG_DIRTY);
669 }
Nick Piggin19fd6232008-07-25 19:45:32 -0700670 spin_unlock_irq(&mapping->tree_lock);
Nick Piggin787d2212007-07-17 04:03:34 -0700671 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
Nick Piggin787d2212007-07-17 04:03:34 -0700672}
673
674/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 * Add a page to the dirty page list.
676 *
677 * It is a sad fact of life that this function is called from several places
678 * deeply under spinlocking. It may not sleep.
679 *
680 * If the page has buffers, the uptodate buffers are set dirty, to preserve
681 * dirty-state coherency between the page and the buffers. It the page does
682 * not have buffers then when they are later attached they will all be set
683 * dirty.
684 *
685 * The buffers are dirtied before the page is dirtied. There's a small race
686 * window in which a writepage caller may see the page cleanness but not the
687 * buffer dirtiness. That's fine. If this code were to set the page dirty
688 * before the buffers, a concurrent writepage caller could clear the page dirty
689 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
690 * page on the dirty page list.
691 *
692 * We use private_lock to lock against try_to_free_buffers while using the
693 * page's buffer list. Also use this to protect against clean buffers being
694 * added to the page after it was set dirty.
695 *
696 * FIXME: may need to call ->reservepage here as well. That's rather up to the
697 * address_space though.
698 */
699int __set_page_dirty_buffers(struct page *page)
700{
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700701 int newly_dirty;
Nick Piggin787d2212007-07-17 04:03:34 -0700702 struct address_space *mapping = page_mapping(page);
Nick Pigginebf7a222006-10-10 04:36:54 +0200703
704 if (unlikely(!mapping))
705 return !TestSetPageDirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
707 spin_lock(&mapping->private_lock);
708 if (page_has_buffers(page)) {
709 struct buffer_head *head = page_buffers(page);
710 struct buffer_head *bh = head;
711
712 do {
713 set_buffer_dirty(bh);
714 bh = bh->b_this_page;
715 } while (bh != head);
716 }
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700717 newly_dirty = !TestSetPageDirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 spin_unlock(&mapping->private_lock);
719
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700720 if (newly_dirty)
721 __set_page_dirty(page, mapping, 1);
722 return newly_dirty;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723}
724EXPORT_SYMBOL(__set_page_dirty_buffers);
725
726/*
727 * Write out and wait upon a list of buffers.
728 *
729 * We have conflicting pressures: we want to make sure that all
730 * initially dirty buffers get waited on, but that any subsequently
731 * dirtied buffers don't. After all, we don't want fsync to last
732 * forever if somebody is actively writing to the file.
733 *
734 * Do this in two main stages: first we copy dirty buffers to a
735 * temporary inode list, queueing the writes as we go. Then we clean
736 * up, waiting for those writes to complete.
737 *
738 * During this second stage, any subsequent updates to the file may end
739 * up refiling the buffer on the original inode's dirty list again, so
740 * there is a chance we will end up with a buffer queued for write but
741 * not yet completed on that list. So, as a final cleanup we go through
742 * the osync code to catch these locked, dirty buffers without requeuing
743 * any newly dirty buffers for write.
744 */
745static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
746{
747 struct buffer_head *bh;
748 struct list_head tmp;
Jens Axboe7eaceac2011-03-10 08:52:07 +0100749 struct address_space *mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 int err = 0, err2;
Jens Axboe4ee24912011-03-17 10:51:40 +0100751 struct blk_plug plug;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
753 INIT_LIST_HEAD(&tmp);
Jens Axboe4ee24912011-03-17 10:51:40 +0100754 blk_start_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
756 spin_lock(lock);
757 while (!list_empty(list)) {
758 bh = BH_ENTRY(list->next);
Jan Kara535ee2f2008-02-08 04:21:59 -0800759 mapping = bh->b_assoc_map;
Jan Kara58ff4072006-10-17 00:10:19 -0700760 __remove_assoc_queue(bh);
Jan Kara535ee2f2008-02-08 04:21:59 -0800761 /* Avoid race with mark_buffer_dirty_inode() which does
762 * a lockless check and we rely on seeing the dirty bit */
763 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 if (buffer_dirty(bh) || buffer_locked(bh)) {
765 list_add(&bh->b_assoc_buffers, &tmp);
Jan Kara535ee2f2008-02-08 04:21:59 -0800766 bh->b_assoc_map = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 if (buffer_dirty(bh)) {
768 get_bh(bh);
769 spin_unlock(lock);
770 /*
771 * Ensure any pending I/O completes so that
Christoph Hellwig9cb569d2010-08-11 17:06:24 +0200772 * write_dirty_buffer() actually writes the
773 * current contents - it is a noop if I/O is
774 * still in flight on potentially older
775 * contents.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 */
Jens Axboe721a9602011-03-09 11:56:30 +0100777 write_dirty_buffer(bh, WRITE_SYNC);
Jens Axboe9cf6b722009-04-06 14:48:03 +0200778
779 /*
780 * Kick off IO for the previous mapping. Note
781 * that we will not run the very last mapping,
782 * wait_on_buffer() will do that for us
783 * through sync_buffer().
784 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 brelse(bh);
786 spin_lock(lock);
787 }
788 }
789 }
790
Jens Axboe4ee24912011-03-17 10:51:40 +0100791 spin_unlock(lock);
792 blk_finish_plug(&plug);
793 spin_lock(lock);
794
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 while (!list_empty(&tmp)) {
796 bh = BH_ENTRY(tmp.prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 get_bh(bh);
Jan Kara535ee2f2008-02-08 04:21:59 -0800798 mapping = bh->b_assoc_map;
799 __remove_assoc_queue(bh);
800 /* Avoid race with mark_buffer_dirty_inode() which does
801 * a lockless check and we rely on seeing the dirty bit */
802 smp_mb();
803 if (buffer_dirty(bh)) {
804 list_add(&bh->b_assoc_buffers,
Jan Karae3892292008-03-04 14:28:33 -0800805 &mapping->private_list);
Jan Kara535ee2f2008-02-08 04:21:59 -0800806 bh->b_assoc_map = mapping;
807 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 spin_unlock(lock);
809 wait_on_buffer(bh);
810 if (!buffer_uptodate(bh))
811 err = -EIO;
812 brelse(bh);
813 spin_lock(lock);
814 }
815
816 spin_unlock(lock);
817 err2 = osync_buffers_list(lock, list);
818 if (err)
819 return err;
820 else
821 return err2;
822}
823
824/*
825 * Invalidate any and all dirty buffers on a given inode. We are
826 * probably unmounting the fs, but that doesn't mean we have already
827 * done a sync(). Just drop the buffers from the inode list.
828 *
829 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
830 * assumes that all the buffers are against the blockdev. Not true
831 * for reiserfs.
832 */
833void invalidate_inode_buffers(struct inode *inode)
834{
835 if (inode_has_buffers(inode)) {
836 struct address_space *mapping = &inode->i_data;
837 struct list_head *list = &mapping->private_list;
838 struct address_space *buffer_mapping = mapping->assoc_mapping;
839
840 spin_lock(&buffer_mapping->private_lock);
841 while (!list_empty(list))
842 __remove_assoc_queue(BH_ENTRY(list->next));
843 spin_unlock(&buffer_mapping->private_lock);
844 }
845}
Jan Kara52b19ac2008-09-23 18:24:08 +0200846EXPORT_SYMBOL(invalidate_inode_buffers);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
848/*
849 * Remove any clean buffers from the inode's buffer list. This is called
850 * when we're trying to free the inode itself. Those buffers can pin it.
851 *
852 * Returns true if all buffers were removed.
853 */
854int remove_inode_buffers(struct inode *inode)
855{
856 int ret = 1;
857
858 if (inode_has_buffers(inode)) {
859 struct address_space *mapping = &inode->i_data;
860 struct list_head *list = &mapping->private_list;
861 struct address_space *buffer_mapping = mapping->assoc_mapping;
862
863 spin_lock(&buffer_mapping->private_lock);
864 while (!list_empty(list)) {
865 struct buffer_head *bh = BH_ENTRY(list->next);
866 if (buffer_dirty(bh)) {
867 ret = 0;
868 break;
869 }
870 __remove_assoc_queue(bh);
871 }
872 spin_unlock(&buffer_mapping->private_lock);
873 }
874 return ret;
875}
876
877/*
878 * Create the appropriate buffers when given a page for data area and
879 * the size of each buffer.. Use the bh->b_this_page linked list to
880 * follow the buffers created. Return NULL if unable to create more
881 * buffers.
882 *
883 * The retry flag is used to differentiate async IO (paging, swapping)
884 * which may not fail from ordinary buffer allocations.
885 */
886struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
887 int retry)
888{
889 struct buffer_head *bh, *head;
890 long offset;
891
892try_again:
893 head = NULL;
894 offset = PAGE_SIZE;
895 while ((offset -= size) >= 0) {
896 bh = alloc_buffer_head(GFP_NOFS);
897 if (!bh)
898 goto no_grow;
899
900 bh->b_bdev = NULL;
901 bh->b_this_page = head;
902 bh->b_blocknr = -1;
903 head = bh;
904
905 bh->b_state = 0;
906 atomic_set(&bh->b_count, 0);
907 bh->b_size = size;
908
909 /* Link the buffer to its page */
910 set_bh_page(bh, page, offset);
911
Nathan Scott01ffe332006-01-17 09:02:07 +1100912 init_buffer(bh, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 }
914 return head;
915/*
916 * In case anything failed, we just free everything we got.
917 */
918no_grow:
919 if (head) {
920 do {
921 bh = head;
922 head = head->b_this_page;
923 free_buffer_head(bh);
924 } while (head);
925 }
926
927 /*
928 * Return failure for non-async IO requests. Async IO requests
929 * are not allowed to fail, so we have to wait until buffer heads
930 * become available. But we don't want tasks sleeping with
931 * partially complete buffers, so all were released above.
932 */
933 if (!retry)
934 return NULL;
935
936 /* We're _really_ low on memory. Now we just
937 * wait for old buffer heads to become free due to
938 * finishing IO. Since this is an async request and
939 * the reserve list is empty, we're sure there are
940 * async buffer heads in use.
941 */
942 free_more_memory();
943 goto try_again;
944}
945EXPORT_SYMBOL_GPL(alloc_page_buffers);
946
947static inline void
948link_dev_buffers(struct page *page, struct buffer_head *head)
949{
950 struct buffer_head *bh, *tail;
951
952 bh = head;
953 do {
954 tail = bh;
955 bh = bh->b_this_page;
956 } while (bh);
957 tail->b_this_page = head;
958 attach_page_buffers(page, head);
959}
960
961/*
962 * Initialise the state of a blockdev page's buffers.
963 */
964static void
965init_page_buffers(struct page *page, struct block_device *bdev,
966 sector_t block, int size)
967{
968 struct buffer_head *head = page_buffers(page);
969 struct buffer_head *bh = head;
970 int uptodate = PageUptodate(page);
971
972 do {
973 if (!buffer_mapped(bh)) {
974 init_buffer(bh, NULL, NULL);
975 bh->b_bdev = bdev;
976 bh->b_blocknr = block;
977 if (uptodate)
978 set_buffer_uptodate(bh);
979 set_buffer_mapped(bh);
980 }
981 block++;
982 bh = bh->b_this_page;
983 } while (bh != head);
984}
985
986/*
987 * Create the page-cache page that contains the requested block.
988 *
989 * This is user purely for blockdev mappings.
990 */
991static struct page *
992grow_dev_page(struct block_device *bdev, sector_t block,
993 pgoff_t index, int size)
994{
995 struct inode *inode = bdev->bd_inode;
996 struct page *page;
997 struct buffer_head *bh;
998
Christoph Lameterea125892007-05-16 22:11:21 -0700999 page = find_or_create_page(inode->i_mapping, index,
Mel Gorman769848c2007-07-17 04:03:05 -07001000 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 if (!page)
1002 return NULL;
1003
Eric Sesterhenne827f922006-03-26 18:24:46 +02001004 BUG_ON(!PageLocked(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
1006 if (page_has_buffers(page)) {
1007 bh = page_buffers(page);
1008 if (bh->b_size == size) {
1009 init_page_buffers(page, bdev, block, size);
1010 return page;
1011 }
1012 if (!try_to_free_buffers(page))
1013 goto failed;
1014 }
1015
1016 /*
1017 * Allocate some buffers for this page
1018 */
1019 bh = alloc_page_buffers(page, size, 0);
1020 if (!bh)
1021 goto failed;
1022
1023 /*
1024 * Link the page to the buffers and initialise them. Take the
1025 * lock to be atomic wrt __find_get_block(), which does not
1026 * run under the page lock.
1027 */
1028 spin_lock(&inode->i_mapping->private_lock);
1029 link_dev_buffers(page, bh);
1030 init_page_buffers(page, bdev, block, size);
1031 spin_unlock(&inode->i_mapping->private_lock);
1032 return page;
1033
1034failed:
1035 BUG();
1036 unlock_page(page);
1037 page_cache_release(page);
1038 return NULL;
1039}
1040
1041/*
1042 * Create buffers for the specified block device block's page. If
1043 * that page was dirty, the buffers are set dirty also.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001045static int
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046grow_buffers(struct block_device *bdev, sector_t block, int size)
1047{
1048 struct page *page;
1049 pgoff_t index;
1050 int sizebits;
1051
1052 sizebits = -1;
1053 do {
1054 sizebits++;
1055 } while ((size << sizebits) < PAGE_SIZE);
1056
1057 index = block >> sizebits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058
Andrew Mortone5657932006-10-11 01:21:46 -07001059 /*
1060 * Check for a block which wants to lie outside our maximum possible
1061 * pagecache index. (this comparison is done using sector_t types).
1062 */
1063 if (unlikely(index != block >> sizebits)) {
1064 char b[BDEVNAME_SIZE];
1065
1066 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1067 "device %s\n",
Harvey Harrison8e24eea2008-04-30 00:55:09 -07001068 __func__, (unsigned long long)block,
Andrew Mortone5657932006-10-11 01:21:46 -07001069 bdevname(bdev, b));
1070 return -EIO;
1071 }
1072 block = index << sizebits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 /* Create a page with the proper size buffers.. */
1074 page = grow_dev_page(bdev, block, index, size);
1075 if (!page)
1076 return 0;
1077 unlock_page(page);
1078 page_cache_release(page);
1079 return 1;
1080}
1081
Adrian Bunk75c96f82005-05-05 16:16:09 -07001082static struct buffer_head *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083__getblk_slow(struct block_device *bdev, sector_t block, int size)
1084{
1085 /* Size must be multiple of hard sectorsize */
Martin K. Petersene1defc42009-05-22 17:17:49 -04001086 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 (size < 512 || size > PAGE_SIZE))) {
1088 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1089 size);
Martin K. Petersene1defc42009-05-22 17:17:49 -04001090 printk(KERN_ERR "logical block size: %d\n",
1091 bdev_logical_block_size(bdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092
1093 dump_stack();
1094 return NULL;
1095 }
1096
1097 for (;;) {
1098 struct buffer_head * bh;
Andrew Mortone5657932006-10-11 01:21:46 -07001099 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100
1101 bh = __find_get_block(bdev, block, size);
1102 if (bh)
1103 return bh;
1104
Andrew Mortone5657932006-10-11 01:21:46 -07001105 ret = grow_buffers(bdev, block, size);
1106 if (ret < 0)
1107 return NULL;
1108 if (ret == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 free_more_memory();
1110 }
1111}
1112
1113/*
1114 * The relationship between dirty buffers and dirty pages:
1115 *
1116 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1117 * the page is tagged dirty in its radix tree.
1118 *
1119 * At all times, the dirtiness of the buffers represents the dirtiness of
1120 * subsections of the page. If the page has buffers, the page dirty bit is
1121 * merely a hint about the true dirty state.
1122 *
1123 * When a page is set dirty in its entirety, all its buffers are marked dirty
1124 * (if the page has buffers).
1125 *
1126 * When a buffer is marked dirty, its page is dirtied, but the page's other
1127 * buffers are not.
1128 *
1129 * Also. When blockdev buffers are explicitly read with bread(), they
1130 * individually become uptodate. But their backing page remains not
1131 * uptodate - even if all of its buffers are uptodate. A subsequent
1132 * block_read_full_page() against that page will discover all the uptodate
1133 * buffers, will set the page uptodate and will perform no I/O.
1134 */
1135
1136/**
1137 * mark_buffer_dirty - mark a buffer_head as needing writeout
Martin Waitz67be2dd2005-05-01 08:59:26 -07001138 * @bh: the buffer_head to mark dirty
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 *
1140 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1141 * backing page dirty, then tag the page as dirty in its address_space's radix
1142 * tree and then attach the address_space's inode to its superblock's dirty
1143 * inode list.
1144 *
1145 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
Dave Chinner250df6e2011-03-22 22:23:36 +11001146 * mapping->tree_lock and mapping->host->i_lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 */
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -08001148void mark_buffer_dirty(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149{
Nick Piggin787d2212007-07-17 04:03:34 -07001150 WARN_ON_ONCE(!buffer_uptodate(bh));
Linus Torvalds1be62dc2008-04-04 14:38:17 -07001151
1152 /*
1153 * Very *carefully* optimize the it-is-already-dirty case.
1154 *
1155 * Don't let the final "is it dirty" escape to before we
1156 * perhaps modified the buffer.
1157 */
1158 if (buffer_dirty(bh)) {
1159 smp_mb();
1160 if (buffer_dirty(bh))
1161 return;
1162 }
1163
Linus Torvaldsa8e7d492009-03-19 11:32:05 -07001164 if (!test_set_buffer_dirty(bh)) {
1165 struct page *page = bh->b_page;
Linus Torvalds8e9d78e2009-08-21 17:40:08 -07001166 if (!TestSetPageDirty(page)) {
1167 struct address_space *mapping = page_mapping(page);
1168 if (mapping)
1169 __set_page_dirty(page, mapping, 0);
1170 }
Linus Torvaldsa8e7d492009-03-19 11:32:05 -07001171 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07001173EXPORT_SYMBOL(mark_buffer_dirty);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174
1175/*
1176 * Decrement a buffer_head's reference count. If all buffers against a page
1177 * have zero reference count, are clean and unlocked, and if the page is clean
1178 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1179 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1180 * a page but it ends up not being freed, and buffers may later be reattached).
1181 */
1182void __brelse(struct buffer_head * buf)
1183{
1184 if (atomic_read(&buf->b_count)) {
1185 put_bh(buf);
1186 return;
1187 }
Arjan van de Ven5c752ad2008-07-25 19:45:40 -07001188 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07001190EXPORT_SYMBOL(__brelse);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
1192/*
1193 * bforget() is like brelse(), except it discards any
1194 * potentially dirty data.
1195 */
1196void __bforget(struct buffer_head *bh)
1197{
1198 clear_buffer_dirty(bh);
Jan Kara535ee2f2008-02-08 04:21:59 -08001199 if (bh->b_assoc_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 struct address_space *buffer_mapping = bh->b_page->mapping;
1201
1202 spin_lock(&buffer_mapping->private_lock);
1203 list_del_init(&bh->b_assoc_buffers);
Jan Kara58ff4072006-10-17 00:10:19 -07001204 bh->b_assoc_map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 spin_unlock(&buffer_mapping->private_lock);
1206 }
1207 __brelse(bh);
1208}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07001209EXPORT_SYMBOL(__bforget);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210
1211static struct buffer_head *__bread_slow(struct buffer_head *bh)
1212{
1213 lock_buffer(bh);
1214 if (buffer_uptodate(bh)) {
1215 unlock_buffer(bh);
1216 return bh;
1217 } else {
1218 get_bh(bh);
1219 bh->b_end_io = end_buffer_read_sync;
1220 submit_bh(READ, bh);
1221 wait_on_buffer(bh);
1222 if (buffer_uptodate(bh))
1223 return bh;
1224 }
1225 brelse(bh);
1226 return NULL;
1227}
1228
1229/*
1230 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1231 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1232 * refcount elevated by one when they're in an LRU. A buffer can only appear
1233 * once in a particular CPU's LRU. A single buffer can be present in multiple
1234 * CPU's LRUs at the same time.
1235 *
1236 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1237 * sb_find_get_block().
1238 *
1239 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1240 * a local interrupt disable for that.
1241 */
1242
1243#define BH_LRU_SIZE 8
1244
1245struct bh_lru {
1246 struct buffer_head *bhs[BH_LRU_SIZE];
1247};
1248
1249static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1250
1251#ifdef CONFIG_SMP
1252#define bh_lru_lock() local_irq_disable()
1253#define bh_lru_unlock() local_irq_enable()
1254#else
1255#define bh_lru_lock() preempt_disable()
1256#define bh_lru_unlock() preempt_enable()
1257#endif
1258
1259static inline void check_irqs_on(void)
1260{
1261#ifdef irqs_disabled
1262 BUG_ON(irqs_disabled());
1263#endif
1264}
1265
1266/*
1267 * The LRU management algorithm is dopey-but-simple. Sorry.
1268 */
1269static void bh_lru_install(struct buffer_head *bh)
1270{
1271 struct buffer_head *evictee = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
1273 check_irqs_on();
1274 bh_lru_lock();
Christoph Lameterc7b92512010-12-06 11:16:28 -06001275 if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 struct buffer_head *bhs[BH_LRU_SIZE];
1277 int in;
1278 int out = 0;
1279
1280 get_bh(bh);
1281 bhs[out++] = bh;
1282 for (in = 0; in < BH_LRU_SIZE; in++) {
Christoph Lameterc7b92512010-12-06 11:16:28 -06001283 struct buffer_head *bh2 =
1284 __this_cpu_read(bh_lrus.bhs[in]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285
1286 if (bh2 == bh) {
1287 __brelse(bh2);
1288 } else {
1289 if (out >= BH_LRU_SIZE) {
1290 BUG_ON(evictee != NULL);
1291 evictee = bh2;
1292 } else {
1293 bhs[out++] = bh2;
1294 }
1295 }
1296 }
1297 while (out < BH_LRU_SIZE)
1298 bhs[out++] = NULL;
Christoph Lameterc7b92512010-12-06 11:16:28 -06001299 memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 }
1301 bh_lru_unlock();
1302
1303 if (evictee)
1304 __brelse(evictee);
1305}
1306
1307/*
1308 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1309 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001310static struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001311lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312{
1313 struct buffer_head *ret = NULL;
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001314 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
1316 check_irqs_on();
1317 bh_lru_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 for (i = 0; i < BH_LRU_SIZE; i++) {
Christoph Lameterc7b92512010-12-06 11:16:28 -06001319 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320
1321 if (bh && bh->b_bdev == bdev &&
1322 bh->b_blocknr == block && bh->b_size == size) {
1323 if (i) {
1324 while (i) {
Christoph Lameterc7b92512010-12-06 11:16:28 -06001325 __this_cpu_write(bh_lrus.bhs[i],
1326 __this_cpu_read(bh_lrus.bhs[i - 1]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 i--;
1328 }
Christoph Lameterc7b92512010-12-06 11:16:28 -06001329 __this_cpu_write(bh_lrus.bhs[0], bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 }
1331 get_bh(bh);
1332 ret = bh;
1333 break;
1334 }
1335 }
1336 bh_lru_unlock();
1337 return ret;
1338}
1339
1340/*
1341 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1342 * it in the LRU and mark it as accessed. If it is not present then return
1343 * NULL
1344 */
1345struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001346__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347{
1348 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1349
1350 if (bh == NULL) {
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -08001351 bh = __find_get_block_slow(bdev, block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 if (bh)
1353 bh_lru_install(bh);
1354 }
1355 if (bh)
1356 touch_buffer(bh);
1357 return bh;
1358}
1359EXPORT_SYMBOL(__find_get_block);
1360
1361/*
1362 * __getblk will locate (and, if necessary, create) the buffer_head
1363 * which corresponds to the passed block_device, block and size. The
1364 * returned buffer has its reference count incremented.
1365 *
1366 * __getblk() cannot fail - it just keeps trying. If you pass it an
1367 * illegal block number, __getblk() will happily return a buffer_head
1368 * which represents the non-existent block. Very weird.
1369 *
1370 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1371 * attempt is failing. FIXME, perhaps?
1372 */
1373struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001374__getblk(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375{
1376 struct buffer_head *bh = __find_get_block(bdev, block, size);
1377
1378 might_sleep();
1379 if (bh == NULL)
1380 bh = __getblk_slow(bdev, block, size);
1381 return bh;
1382}
1383EXPORT_SYMBOL(__getblk);
1384
1385/*
1386 * Do async read-ahead on a buffer..
1387 */
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001388void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389{
1390 struct buffer_head *bh = __getblk(bdev, block, size);
Andrew Mortona3e713b2005-10-30 15:03:15 -08001391 if (likely(bh)) {
1392 ll_rw_block(READA, 1, &bh);
1393 brelse(bh);
1394 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395}
1396EXPORT_SYMBOL(__breadahead);
1397
1398/**
1399 * __bread() - reads a specified block and returns the bh
Martin Waitz67be2dd2005-05-01 08:59:26 -07001400 * @bdev: the block_device to read from
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 * @block: number of block
1402 * @size: size (in bytes) to read
1403 *
1404 * Reads a specified block, and returns buffer head that contains it.
1405 * It returns NULL if the block was unreadable.
1406 */
1407struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001408__bread(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409{
1410 struct buffer_head *bh = __getblk(bdev, block, size);
1411
Andrew Mortona3e713b2005-10-30 15:03:15 -08001412 if (likely(bh) && !buffer_uptodate(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 bh = __bread_slow(bh);
1414 return bh;
1415}
1416EXPORT_SYMBOL(__bread);
1417
1418/*
1419 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1420 * This doesn't race because it runs in each cpu either in irq
1421 * or with preempt disabled.
1422 */
1423static void invalidate_bh_lru(void *arg)
1424{
1425 struct bh_lru *b = &get_cpu_var(bh_lrus);
1426 int i;
1427
1428 for (i = 0; i < BH_LRU_SIZE; i++) {
1429 brelse(b->bhs[i]);
1430 b->bhs[i] = NULL;
1431 }
1432 put_cpu_var(bh_lrus);
1433}
1434
Peter Zijlstraf9a14392007-05-06 14:49:55 -07001435void invalidate_bh_lrus(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436{
Jens Axboe15c8b6c2008-05-09 09:39:44 +02001437 on_each_cpu(invalidate_bh_lru, NULL, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438}
Nick Piggin9db55792008-02-08 04:19:49 -08001439EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440
1441void set_bh_page(struct buffer_head *bh,
1442 struct page *page, unsigned long offset)
1443{
1444 bh->b_page = page;
Eric Sesterhenne827f922006-03-26 18:24:46 +02001445 BUG_ON(offset >= PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 if (PageHighMem(page))
1447 /*
1448 * This catches illegal uses and preserves the offset:
1449 */
1450 bh->b_data = (char *)(0 + offset);
1451 else
1452 bh->b_data = page_address(page) + offset;
1453}
1454EXPORT_SYMBOL(set_bh_page);
1455
1456/*
1457 * Called when truncating a buffer on a page completely.
1458 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001459static void discard_buffer(struct buffer_head * bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460{
1461 lock_buffer(bh);
1462 clear_buffer_dirty(bh);
1463 bh->b_bdev = NULL;
1464 clear_buffer_mapped(bh);
1465 clear_buffer_req(bh);
1466 clear_buffer_new(bh);
1467 clear_buffer_delay(bh);
David Chinner33a266d2007-02-12 00:51:41 -08001468 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 unlock_buffer(bh);
1470}
1471
1472/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 * block_invalidatepage - invalidate part of all of a buffer-backed page
1474 *
1475 * @page: the page which is affected
1476 * @offset: the index of the truncation point
1477 *
1478 * block_invalidatepage() is called when all or part of the page has become
1479 * invalidatedby a truncate operation.
1480 *
1481 * block_invalidatepage() does not have to release all buffers, but it must
1482 * ensure that no dirty buffer is left outside @offset and that no I/O
1483 * is underway against any of the blocks which are outside the truncation
1484 * point. Because the caller is about to free (and possibly reuse) those
1485 * blocks on-disk.
1486 */
NeilBrown2ff28e22006-03-26 01:37:18 -08001487void block_invalidatepage(struct page *page, unsigned long offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488{
1489 struct buffer_head *head, *bh, *next;
1490 unsigned int curr_off = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491
1492 BUG_ON(!PageLocked(page));
1493 if (!page_has_buffers(page))
1494 goto out;
1495
1496 head = page_buffers(page);
1497 bh = head;
1498 do {
1499 unsigned int next_off = curr_off + bh->b_size;
1500 next = bh->b_this_page;
1501
1502 /*
1503 * is this block fully invalidated?
1504 */
1505 if (offset <= curr_off)
1506 discard_buffer(bh);
1507 curr_off = next_off;
1508 bh = next;
1509 } while (bh != head);
1510
1511 /*
1512 * We release buffers only if the entire page is being invalidated.
1513 * The get_block cached value has been unconditionally invalidated,
1514 * so real IO is not possible anymore.
1515 */
1516 if (offset == 0)
NeilBrown2ff28e22006-03-26 01:37:18 -08001517 try_to_release_page(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518out:
NeilBrown2ff28e22006-03-26 01:37:18 -08001519 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520}
1521EXPORT_SYMBOL(block_invalidatepage);
1522
1523/*
1524 * We attach and possibly dirty the buffers atomically wrt
1525 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1526 * is already excluded via the page lock.
1527 */
1528void create_empty_buffers(struct page *page,
1529 unsigned long blocksize, unsigned long b_state)
1530{
1531 struct buffer_head *bh, *head, *tail;
1532
1533 head = alloc_page_buffers(page, blocksize, 1);
1534 bh = head;
1535 do {
1536 bh->b_state |= b_state;
1537 tail = bh;
1538 bh = bh->b_this_page;
1539 } while (bh);
1540 tail->b_this_page = head;
1541
1542 spin_lock(&page->mapping->private_lock);
1543 if (PageUptodate(page) || PageDirty(page)) {
1544 bh = head;
1545 do {
1546 if (PageDirty(page))
1547 set_buffer_dirty(bh);
1548 if (PageUptodate(page))
1549 set_buffer_uptodate(bh);
1550 bh = bh->b_this_page;
1551 } while (bh != head);
1552 }
1553 attach_page_buffers(page, head);
1554 spin_unlock(&page->mapping->private_lock);
1555}
1556EXPORT_SYMBOL(create_empty_buffers);
1557
1558/*
1559 * We are taking a block for data and we don't want any output from any
1560 * buffer-cache aliases starting from return from that function and
1561 * until the moment when something will explicitly mark the buffer
1562 * dirty (hopefully that will not happen until we will free that block ;-)
1563 * We don't even need to mark it not-uptodate - nobody can expect
1564 * anything from a newly allocated buffer anyway. We used to used
1565 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1566 * don't want to mark the alias unmapped, for example - it would confuse
1567 * anyone who might pick it with bread() afterwards...
1568 *
1569 * Also.. Note that bforget() doesn't lock the buffer. So there can
1570 * be writeout I/O going on against recently-freed buffers. We don't
1571 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1572 * only if we really need to. That happens here.
1573 */
1574void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1575{
1576 struct buffer_head *old_bh;
1577
1578 might_sleep();
1579
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -08001580 old_bh = __find_get_block_slow(bdev, block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 if (old_bh) {
1582 clear_buffer_dirty(old_bh);
1583 wait_on_buffer(old_bh);
1584 clear_buffer_req(old_bh);
1585 __brelse(old_bh);
1586 }
1587}
1588EXPORT_SYMBOL(unmap_underlying_metadata);
1589
1590/*
1591 * NOTE! All mapped/uptodate combinations are valid:
1592 *
1593 * Mapped Uptodate Meaning
1594 *
1595 * No No "unknown" - must do get_block()
1596 * No Yes "hole" - zero-filled
1597 * Yes No "allocated" - allocated on disk, not read in
1598 * Yes Yes "valid" - allocated and up-to-date in memory.
1599 *
1600 * "Dirty" is valid only with the last case (mapped+uptodate).
1601 */
1602
1603/*
1604 * While block_write_full_page is writing back the dirty buffers under
1605 * the page lock, whoever dirtied the buffers may decide to clean them
1606 * again at any time. We handle that by only looking at the buffer
1607 * state inside lock_buffer().
1608 *
1609 * If block_write_full_page() is called for regular writeback
1610 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1611 * locked buffer. This only can happen if someone has written the buffer
1612 * directly, with submit_bh(). At the address_space level PageWriteback
1613 * prevents this contention from occurring.
Theodore Ts'o6e34eed2009-04-07 18:12:43 -04001614 *
1615 * If block_write_full_page() is called with wbc->sync_mode ==
Jens Axboe721a9602011-03-09 11:56:30 +01001616 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
1617 * causes the writes to be flagged as synchronous writes.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 */
1619static int __block_write_full_page(struct inode *inode, struct page *page,
Chris Mason35c80d52009-04-15 13:22:38 -04001620 get_block_t *get_block, struct writeback_control *wbc,
1621 bh_end_io_t *handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622{
1623 int err;
1624 sector_t block;
1625 sector_t last_block;
Andrew Mortonf0fbd5f2005-05-05 16:15:48 -07001626 struct buffer_head *bh, *head;
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001627 const unsigned blocksize = 1 << inode->i_blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 int nr_underway = 0;
Theodore Ts'o6e34eed2009-04-07 18:12:43 -04001629 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
Jens Axboe721a9602011-03-09 11:56:30 +01001630 WRITE_SYNC : WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631
1632 BUG_ON(!PageLocked(page));
1633
1634 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1635
1636 if (!page_has_buffers(page)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001637 create_empty_buffers(page, blocksize,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 (1 << BH_Dirty)|(1 << BH_Uptodate));
1639 }
1640
1641 /*
1642 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1643 * here, and the (potentially unmapped) buffers may become dirty at
1644 * any time. If a buffer becomes dirty here after we've inspected it
1645 * then we just miss that fact, and the page stays dirty.
1646 *
1647 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1648 * handle that here by just cleaning them.
1649 */
1650
Andrew Morton54b21a72006-01-08 01:03:05 -08001651 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 head = page_buffers(page);
1653 bh = head;
1654
1655 /*
1656 * Get all the dirty buffers mapped to disk addresses and
1657 * handle any aliases from the underlying blockdev's mapping.
1658 */
1659 do {
1660 if (block > last_block) {
1661 /*
1662 * mapped buffers outside i_size will occur, because
1663 * this page can be outside i_size when there is a
1664 * truncate in progress.
1665 */
1666 /*
1667 * The buffer was zeroed by block_write_full_page()
1668 */
1669 clear_buffer_dirty(bh);
1670 set_buffer_uptodate(bh);
Alex Tomas29a814d2008-07-11 19:27:31 -04001671 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1672 buffer_dirty(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001673 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 err = get_block(inode, block, bh, 1);
1675 if (err)
1676 goto recover;
Alex Tomas29a814d2008-07-11 19:27:31 -04001677 clear_buffer_delay(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 if (buffer_new(bh)) {
1679 /* blockdev mappings never come here */
1680 clear_buffer_new(bh);
1681 unmap_underlying_metadata(bh->b_bdev,
1682 bh->b_blocknr);
1683 }
1684 }
1685 bh = bh->b_this_page;
1686 block++;
1687 } while (bh != head);
1688
1689 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 if (!buffer_mapped(bh))
1691 continue;
1692 /*
1693 * If it's a fully non-blocking write attempt and we cannot
1694 * lock the buffer then redirty the page. Note that this can
Jens Axboe5b0830c2009-09-23 19:37:09 +02001695 * potentially cause a busy-wait loop from writeback threads
1696 * and kswapd activity, but those code paths have their own
1697 * higher-level throttling.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 */
Wu Fengguang1b430be2010-10-26 14:21:26 -07001699 if (wbc->sync_mode != WB_SYNC_NONE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 lock_buffer(bh);
Nick Pigginca5de402008-08-02 12:02:13 +02001701 } else if (!trylock_buffer(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 redirty_page_for_writepage(wbc, page);
1703 continue;
1704 }
1705 if (test_clear_buffer_dirty(bh)) {
Chris Mason35c80d52009-04-15 13:22:38 -04001706 mark_buffer_async_write_endio(bh, handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 } else {
1708 unlock_buffer(bh);
1709 }
1710 } while ((bh = bh->b_this_page) != head);
1711
1712 /*
1713 * The page and its buffers are protected by PageWriteback(), so we can
1714 * drop the bh refcounts early.
1715 */
1716 BUG_ON(PageWriteback(page));
1717 set_page_writeback(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718
1719 do {
1720 struct buffer_head *next = bh->b_this_page;
1721 if (buffer_async_write(bh)) {
Theodore Ts'oa64c8612009-03-27 22:14:10 -04001722 submit_bh(write_op, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 nr_underway++;
1724 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 bh = next;
1726 } while (bh != head);
Andrew Morton05937ba2005-05-05 16:15:47 -07001727 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728
1729 err = 0;
1730done:
1731 if (nr_underway == 0) {
1732 /*
1733 * The page was marked dirty, but the buffers were
1734 * clean. Someone wrote them back by hand with
1735 * ll_rw_block/submit_bh. A rare case.
1736 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 end_page_writeback(page);
Nick Piggin3d67f2d2007-05-06 14:49:05 -07001738
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 /*
1740 * The page and buffer_heads can be released at any time from
1741 * here on.
1742 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 }
1744 return err;
1745
1746recover:
1747 /*
1748 * ENOSPC, or some other error. We may already have added some
1749 * blocks to the file, so we need to write these out to avoid
1750 * exposing stale data.
1751 * The page is currently locked and not marked for writeback
1752 */
1753 bh = head;
1754 /* Recovery: lock and submit the mapped buffers */
1755 do {
Alex Tomas29a814d2008-07-11 19:27:31 -04001756 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1757 !buffer_delay(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 lock_buffer(bh);
Chris Mason35c80d52009-04-15 13:22:38 -04001759 mark_buffer_async_write_endio(bh, handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 } else {
1761 /*
1762 * The buffer may have been set dirty during
1763 * attachment to a dirty page.
1764 */
1765 clear_buffer_dirty(bh);
1766 }
1767 } while ((bh = bh->b_this_page) != head);
1768 SetPageError(page);
1769 BUG_ON(PageWriteback(page));
Andrew Morton7e4c3692007-05-08 00:23:27 -07001770 mapping_set_error(page->mapping, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 set_page_writeback(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 do {
1773 struct buffer_head *next = bh->b_this_page;
1774 if (buffer_async_write(bh)) {
1775 clear_buffer_dirty(bh);
Theodore Ts'oa64c8612009-03-27 22:14:10 -04001776 submit_bh(write_op, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 nr_underway++;
1778 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 bh = next;
1780 } while (bh != head);
Nick Pigginffda9d32007-02-20 13:57:54 -08001781 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 goto done;
1783}
1784
Nick Pigginafddba42007-10-16 01:25:01 -07001785/*
1786 * If a page has any new buffers, zero them out here, and mark them uptodate
1787 * and dirty so they'll be written out (in order to prevent uninitialised
1788 * block data from leaking). And clear the new bit.
1789 */
1790void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1791{
1792 unsigned int block_start, block_end;
1793 struct buffer_head *head, *bh;
1794
1795 BUG_ON(!PageLocked(page));
1796 if (!page_has_buffers(page))
1797 return;
1798
1799 bh = head = page_buffers(page);
1800 block_start = 0;
1801 do {
1802 block_end = block_start + bh->b_size;
1803
1804 if (buffer_new(bh)) {
1805 if (block_end > from && block_start < to) {
1806 if (!PageUptodate(page)) {
1807 unsigned start, size;
1808
1809 start = max(from, block_start);
1810 size = min(to, block_end) - start;
1811
Christoph Lametereebd2aa2008-02-04 22:28:29 -08001812 zero_user(page, start, size);
Nick Pigginafddba42007-10-16 01:25:01 -07001813 set_buffer_uptodate(bh);
1814 }
1815
1816 clear_buffer_new(bh);
1817 mark_buffer_dirty(bh);
1818 }
1819 }
1820
1821 block_start = block_end;
1822 bh = bh->b_this_page;
1823 } while (bh != head);
1824}
1825EXPORT_SYMBOL(page_zero_new_buffers);
1826
Christoph Hellwigebdec242010-10-06 10:47:23 +02001827int __block_write_begin(struct page *page, loff_t pos, unsigned len,
Christoph Hellwig6e1db882010-06-04 11:29:57 +02001828 get_block_t *get_block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829{
Christoph Hellwigebdec242010-10-06 10:47:23 +02001830 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1831 unsigned to = from + len;
Christoph Hellwig6e1db882010-06-04 11:29:57 +02001832 struct inode *inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 unsigned block_start, block_end;
1834 sector_t block;
1835 int err = 0;
1836 unsigned blocksize, bbits;
1837 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1838
1839 BUG_ON(!PageLocked(page));
1840 BUG_ON(from > PAGE_CACHE_SIZE);
1841 BUG_ON(to > PAGE_CACHE_SIZE);
1842 BUG_ON(from > to);
1843
1844 blocksize = 1 << inode->i_blkbits;
1845 if (!page_has_buffers(page))
1846 create_empty_buffers(page, blocksize, 0);
1847 head = page_buffers(page);
1848
1849 bbits = inode->i_blkbits;
1850 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1851
1852 for(bh = head, block_start = 0; bh != head || !block_start;
1853 block++, block_start=block_end, bh = bh->b_this_page) {
1854 block_end = block_start + blocksize;
1855 if (block_end <= from || block_start >= to) {
1856 if (PageUptodate(page)) {
1857 if (!buffer_uptodate(bh))
1858 set_buffer_uptodate(bh);
1859 }
1860 continue;
1861 }
1862 if (buffer_new(bh))
1863 clear_buffer_new(bh);
1864 if (!buffer_mapped(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001865 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 err = get_block(inode, block, bh, 1);
1867 if (err)
Nick Pigginf3ddbdc2005-05-05 16:15:45 -07001868 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 if (buffer_new(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 unmap_underlying_metadata(bh->b_bdev,
1871 bh->b_blocknr);
1872 if (PageUptodate(page)) {
Nick Piggin637aff42007-10-16 01:25:00 -07001873 clear_buffer_new(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 set_buffer_uptodate(bh);
Nick Piggin637aff42007-10-16 01:25:00 -07001875 mark_buffer_dirty(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 continue;
1877 }
Christoph Lametereebd2aa2008-02-04 22:28:29 -08001878 if (block_end > to || block_start < from)
1879 zero_user_segments(page,
1880 to, block_end,
1881 block_start, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 continue;
1883 }
1884 }
1885 if (PageUptodate(page)) {
1886 if (!buffer_uptodate(bh))
1887 set_buffer_uptodate(bh);
1888 continue;
1889 }
1890 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
David Chinner33a266d2007-02-12 00:51:41 -08001891 !buffer_unwritten(bh) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 (block_start < from || block_end > to)) {
1893 ll_rw_block(READ, 1, &bh);
1894 *wait_bh++=bh;
1895 }
1896 }
1897 /*
1898 * If we issued read requests - let them complete.
1899 */
1900 while(wait_bh > wait) {
1901 wait_on_buffer(*--wait_bh);
1902 if (!buffer_uptodate(*wait_bh))
Nick Pigginf3ddbdc2005-05-05 16:15:45 -07001903 err = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 }
Christoph Hellwig6e1db882010-06-04 11:29:57 +02001905 if (unlikely(err)) {
Nick Pigginafddba42007-10-16 01:25:01 -07001906 page_zero_new_buffers(page, from, to);
Christoph Hellwig6e1db882010-06-04 11:29:57 +02001907 ClearPageUptodate(page);
1908 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909 return err;
1910}
Christoph Hellwigebdec242010-10-06 10:47:23 +02001911EXPORT_SYMBOL(__block_write_begin);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912
1913static int __block_commit_write(struct inode *inode, struct page *page,
1914 unsigned from, unsigned to)
1915{
1916 unsigned block_start, block_end;
1917 int partial = 0;
1918 unsigned blocksize;
1919 struct buffer_head *bh, *head;
1920
1921 blocksize = 1 << inode->i_blkbits;
1922
1923 for(bh = head = page_buffers(page), block_start = 0;
1924 bh != head || !block_start;
1925 block_start=block_end, bh = bh->b_this_page) {
1926 block_end = block_start + blocksize;
1927 if (block_end <= from || block_start >= to) {
1928 if (!buffer_uptodate(bh))
1929 partial = 1;
1930 } else {
1931 set_buffer_uptodate(bh);
1932 mark_buffer_dirty(bh);
1933 }
Nick Pigginafddba42007-10-16 01:25:01 -07001934 clear_buffer_new(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 }
1936
1937 /*
1938 * If this is a partial write which happened to make all buffers
1939 * uptodate then we can optimize away a bogus readpage() for
1940 * the next read(). Here we 'discover' whether the page went
1941 * uptodate as a result of this (potentially partial) write.
1942 */
1943 if (!partial)
1944 SetPageUptodate(page);
1945 return 0;
1946}
1947
1948/*
Christoph Hellwig155130a2010-06-04 11:29:58 +02001949 * block_write_begin takes care of the basic task of block allocation and
1950 * bringing partial write blocks uptodate first.
1951 *
npiggin@suse.de7bb46a62010-05-27 01:05:33 +10001952 * The filesystem needs to handle block truncation upon failure.
Nick Pigginafddba42007-10-16 01:25:01 -07001953 */
Christoph Hellwig155130a2010-06-04 11:29:58 +02001954int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1955 unsigned flags, struct page **pagep, get_block_t *get_block)
Nick Pigginafddba42007-10-16 01:25:01 -07001956{
Christoph Hellwig6e1db882010-06-04 11:29:57 +02001957 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
Nick Pigginafddba42007-10-16 01:25:01 -07001958 struct page *page;
Christoph Hellwig6e1db882010-06-04 11:29:57 +02001959 int status;
Nick Pigginafddba42007-10-16 01:25:01 -07001960
Christoph Hellwig6e1db882010-06-04 11:29:57 +02001961 page = grab_cache_page_write_begin(mapping, index, flags);
1962 if (!page)
1963 return -ENOMEM;
Nick Pigginafddba42007-10-16 01:25:01 -07001964
Christoph Hellwig6e1db882010-06-04 11:29:57 +02001965 status = __block_write_begin(page, pos, len, get_block);
Nick Pigginafddba42007-10-16 01:25:01 -07001966 if (unlikely(status)) {
Christoph Hellwig6e1db882010-06-04 11:29:57 +02001967 unlock_page(page);
1968 page_cache_release(page);
1969 page = NULL;
Nick Pigginafddba42007-10-16 01:25:01 -07001970 }
1971
Christoph Hellwig6e1db882010-06-04 11:29:57 +02001972 *pagep = page;
Nick Pigginafddba42007-10-16 01:25:01 -07001973 return status;
1974}
1975EXPORT_SYMBOL(block_write_begin);
1976
1977int block_write_end(struct file *file, struct address_space *mapping,
1978 loff_t pos, unsigned len, unsigned copied,
1979 struct page *page, void *fsdata)
1980{
1981 struct inode *inode = mapping->host;
1982 unsigned start;
1983
1984 start = pos & (PAGE_CACHE_SIZE - 1);
1985
1986 if (unlikely(copied < len)) {
1987 /*
1988 * The buffers that were written will now be uptodate, so we
1989 * don't have to worry about a readpage reading them and
1990 * overwriting a partial write. However if we have encountered
1991 * a short write and only partially written into a buffer, it
1992 * will not be marked uptodate, so a readpage might come in and
1993 * destroy our partial write.
1994 *
1995 * Do the simplest thing, and just treat any short write to a
1996 * non uptodate page as a zero-length write, and force the
1997 * caller to redo the whole thing.
1998 */
1999 if (!PageUptodate(page))
2000 copied = 0;
2001
2002 page_zero_new_buffers(page, start+copied, start+len);
2003 }
2004 flush_dcache_page(page);
2005
2006 /* This could be a short (even 0-length) commit */
2007 __block_commit_write(inode, page, start, start+copied);
2008
2009 return copied;
2010}
2011EXPORT_SYMBOL(block_write_end);
2012
2013int generic_write_end(struct file *file, struct address_space *mapping,
2014 loff_t pos, unsigned len, unsigned copied,
2015 struct page *page, void *fsdata)
2016{
2017 struct inode *inode = mapping->host;
Jan Karac7d206b2008-07-11 19:27:31 -04002018 int i_size_changed = 0;
Nick Pigginafddba42007-10-16 01:25:01 -07002019
2020 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2021
2022 /*
2023 * No need to use i_size_read() here, the i_size
2024 * cannot change under us because we hold i_mutex.
2025 *
2026 * But it's important to update i_size while still holding page lock:
2027 * page writeout could otherwise come in and zero beyond i_size.
2028 */
2029 if (pos+copied > inode->i_size) {
2030 i_size_write(inode, pos+copied);
Jan Karac7d206b2008-07-11 19:27:31 -04002031 i_size_changed = 1;
Nick Pigginafddba42007-10-16 01:25:01 -07002032 }
2033
2034 unlock_page(page);
2035 page_cache_release(page);
2036
Jan Karac7d206b2008-07-11 19:27:31 -04002037 /*
2038 * Don't mark the inode dirty under page lock. First, it unnecessarily
2039 * makes the holding time of page lock longer. Second, it forces lock
2040 * ordering of page lock and transaction start for journaling
2041 * filesystems.
2042 */
2043 if (i_size_changed)
2044 mark_inode_dirty(inode);
2045
Nick Pigginafddba42007-10-16 01:25:01 -07002046 return copied;
2047}
2048EXPORT_SYMBOL(generic_write_end);
2049
2050/*
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07002051 * block_is_partially_uptodate checks whether buffers within a page are
2052 * uptodate or not.
2053 *
2054 * Returns true if all buffers which correspond to a file portion
2055 * we want to read are uptodate.
2056 */
2057int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2058 unsigned long from)
2059{
2060 struct inode *inode = page->mapping->host;
2061 unsigned block_start, block_end, blocksize;
2062 unsigned to;
2063 struct buffer_head *bh, *head;
2064 int ret = 1;
2065
2066 if (!page_has_buffers(page))
2067 return 0;
2068
2069 blocksize = 1 << inode->i_blkbits;
2070 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2071 to = from + to;
2072 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2073 return 0;
2074
2075 head = page_buffers(page);
2076 bh = head;
2077 block_start = 0;
2078 do {
2079 block_end = block_start + blocksize;
2080 if (block_end > from && block_start < to) {
2081 if (!buffer_uptodate(bh)) {
2082 ret = 0;
2083 break;
2084 }
2085 if (block_end >= to)
2086 break;
2087 }
2088 block_start = block_end;
2089 bh = bh->b_this_page;
2090 } while (bh != head);
2091
2092 return ret;
2093}
2094EXPORT_SYMBOL(block_is_partially_uptodate);
2095
2096/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 * Generic "read page" function for block devices that have the normal
2098 * get_block functionality. This is most of the block device filesystems.
2099 * Reads the page asynchronously --- the unlock_buffer() and
2100 * set/clear_buffer_uptodate() functions propagate buffer state into the
2101 * page struct once IO has completed.
2102 */
2103int block_read_full_page(struct page *page, get_block_t *get_block)
2104{
2105 struct inode *inode = page->mapping->host;
2106 sector_t iblock, lblock;
2107 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2108 unsigned int blocksize;
2109 int nr, i;
2110 int fully_mapped = 1;
2111
Matt Mackallcd7619d2005-05-01 08:59:01 -07002112 BUG_ON(!PageLocked(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 blocksize = 1 << inode->i_blkbits;
2114 if (!page_has_buffers(page))
2115 create_empty_buffers(page, blocksize, 0);
2116 head = page_buffers(page);
2117
2118 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2119 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2120 bh = head;
2121 nr = 0;
2122 i = 0;
2123
2124 do {
2125 if (buffer_uptodate(bh))
2126 continue;
2127
2128 if (!buffer_mapped(bh)) {
Andrew Mortonc64610b2005-05-16 21:53:49 -07002129 int err = 0;
2130
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131 fully_mapped = 0;
2132 if (iblock < lblock) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002133 WARN_ON(bh->b_size != blocksize);
Andrew Mortonc64610b2005-05-16 21:53:49 -07002134 err = get_block(inode, iblock, bh, 0);
2135 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 SetPageError(page);
2137 }
2138 if (!buffer_mapped(bh)) {
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002139 zero_user(page, i * blocksize, blocksize);
Andrew Mortonc64610b2005-05-16 21:53:49 -07002140 if (!err)
2141 set_buffer_uptodate(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 continue;
2143 }
2144 /*
2145 * get_block() might have updated the buffer
2146 * synchronously
2147 */
2148 if (buffer_uptodate(bh))
2149 continue;
2150 }
2151 arr[nr++] = bh;
2152 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2153
2154 if (fully_mapped)
2155 SetPageMappedToDisk(page);
2156
2157 if (!nr) {
2158 /*
2159 * All buffers are uptodate - we can set the page uptodate
2160 * as well. But not if get_block() returned an error.
2161 */
2162 if (!PageError(page))
2163 SetPageUptodate(page);
2164 unlock_page(page);
2165 return 0;
2166 }
2167
2168 /* Stage two: lock the buffers */
2169 for (i = 0; i < nr; i++) {
2170 bh = arr[i];
2171 lock_buffer(bh);
2172 mark_buffer_async_read(bh);
2173 }
2174
2175 /*
2176 * Stage 3: start the IO. Check for uptodateness
2177 * inside the buffer lock in case another process reading
2178 * the underlying blockdev brought it uptodate (the sct fix).
2179 */
2180 for (i = 0; i < nr; i++) {
2181 bh = arr[i];
2182 if (buffer_uptodate(bh))
2183 end_buffer_async_read(bh, 1);
2184 else
2185 submit_bh(READ, bh);
2186 }
2187 return 0;
2188}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002189EXPORT_SYMBOL(block_read_full_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190
2191/* utility function for filesystems that need to do work on expanding
Nick Piggin89e10782007-10-16 01:25:07 -07002192 * truncates. Uses filesystem pagecache writes to allow the filesystem to
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 * deal with the hole.
2194 */
Nick Piggin89e10782007-10-16 01:25:07 -07002195int generic_cont_expand_simple(struct inode *inode, loff_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196{
2197 struct address_space *mapping = inode->i_mapping;
2198 struct page *page;
Nick Piggin89e10782007-10-16 01:25:07 -07002199 void *fsdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 int err;
2201
npiggin@suse.dec08d3b02009-08-21 02:35:06 +10002202 err = inode_newsize_ok(inode, size);
2203 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 goto out;
2205
Nick Piggin89e10782007-10-16 01:25:07 -07002206 err = pagecache_write_begin(NULL, mapping, size, 0,
2207 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2208 &page, &fsdata);
2209 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 goto out;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002211
Nick Piggin89e10782007-10-16 01:25:07 -07002212 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2213 BUG_ON(err > 0);
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002214
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215out:
2216 return err;
2217}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002218EXPORT_SYMBOL(generic_cont_expand_simple);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219
Adrian Bunkf1e3af72008-04-29 00:59:01 -07002220static int cont_expand_zero(struct file *file, struct address_space *mapping,
2221 loff_t pos, loff_t *bytes)
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002222{
Nick Piggin89e10782007-10-16 01:25:07 -07002223 struct inode *inode = mapping->host;
2224 unsigned blocksize = 1 << inode->i_blkbits;
2225 struct page *page;
2226 void *fsdata;
2227 pgoff_t index, curidx;
2228 loff_t curpos;
2229 unsigned zerofrom, offset, len;
2230 int err = 0;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002231
Nick Piggin89e10782007-10-16 01:25:07 -07002232 index = pos >> PAGE_CACHE_SHIFT;
2233 offset = pos & ~PAGE_CACHE_MASK;
2234
2235 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2236 zerofrom = curpos & ~PAGE_CACHE_MASK;
2237 if (zerofrom & (blocksize-1)) {
2238 *bytes |= (blocksize-1);
2239 (*bytes)++;
2240 }
2241 len = PAGE_CACHE_SIZE - zerofrom;
2242
2243 err = pagecache_write_begin(file, mapping, curpos, len,
2244 AOP_FLAG_UNINTERRUPTIBLE,
2245 &page, &fsdata);
2246 if (err)
2247 goto out;
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002248 zero_user(page, zerofrom, len);
Nick Piggin89e10782007-10-16 01:25:07 -07002249 err = pagecache_write_end(file, mapping, curpos, len, len,
2250 page, fsdata);
2251 if (err < 0)
2252 goto out;
2253 BUG_ON(err != len);
2254 err = 0;
OGAWA Hirofumi061e9742008-04-28 02:16:28 -07002255
2256 balance_dirty_pages_ratelimited(mapping);
Nick Piggin89e10782007-10-16 01:25:07 -07002257 }
2258
2259 /* page covers the boundary, find the boundary offset */
2260 if (index == curidx) {
2261 zerofrom = curpos & ~PAGE_CACHE_MASK;
2262 /* if we will expand the thing last block will be filled */
2263 if (offset <= zerofrom) {
2264 goto out;
2265 }
2266 if (zerofrom & (blocksize-1)) {
2267 *bytes |= (blocksize-1);
2268 (*bytes)++;
2269 }
2270 len = offset - zerofrom;
2271
2272 err = pagecache_write_begin(file, mapping, curpos, len,
2273 AOP_FLAG_UNINTERRUPTIBLE,
2274 &page, &fsdata);
2275 if (err)
2276 goto out;
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002277 zero_user(page, zerofrom, len);
Nick Piggin89e10782007-10-16 01:25:07 -07002278 err = pagecache_write_end(file, mapping, curpos, len, len,
2279 page, fsdata);
2280 if (err < 0)
2281 goto out;
2282 BUG_ON(err != len);
2283 err = 0;
2284 }
2285out:
2286 return err;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002287}
2288
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289/*
2290 * For moronic filesystems that do not allow holes in file.
2291 * We may have to extend the file.
2292 */
Christoph Hellwig282dc172010-06-04 11:29:55 +02002293int cont_write_begin(struct file *file, struct address_space *mapping,
Nick Piggin89e10782007-10-16 01:25:07 -07002294 loff_t pos, unsigned len, unsigned flags,
2295 struct page **pagep, void **fsdata,
2296 get_block_t *get_block, loff_t *bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 struct inode *inode = mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 unsigned blocksize = 1 << inode->i_blkbits;
Nick Piggin89e10782007-10-16 01:25:07 -07002300 unsigned zerofrom;
2301 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302
Nick Piggin89e10782007-10-16 01:25:07 -07002303 err = cont_expand_zero(file, mapping, pos, bytes);
2304 if (err)
Christoph Hellwig155130a2010-06-04 11:29:58 +02002305 return err;
Nick Piggin89e10782007-10-16 01:25:07 -07002306
2307 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2308 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2309 *bytes |= (blocksize-1);
2310 (*bytes)++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 }
2312
Christoph Hellwig155130a2010-06-04 11:29:58 +02002313 return block_write_begin(mapping, pos, len, flags, pagep, get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002315EXPORT_SYMBOL(cont_write_begin);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317int block_commit_write(struct page *page, unsigned from, unsigned to)
2318{
2319 struct inode *inode = page->mapping->host;
2320 __block_commit_write(inode,page,from,to);
2321 return 0;
2322}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002323EXPORT_SYMBOL(block_commit_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324
David Chinner54171692007-07-19 17:39:55 +10002325/*
2326 * block_page_mkwrite() is not allowed to change the file size as it gets
2327 * called from a page fault handler when a page is first dirtied. Hence we must
2328 * be careful to check for EOF conditions here. We set the page up correctly
2329 * for a written page which means we get ENOSPC checking when writing into
2330 * holes and correct delalloc and unwritten extent mapping on filesystems that
2331 * support these features.
2332 *
2333 * We are not allowed to take the i_mutex here so we have to play games to
2334 * protect against truncate races as the page could now be beyond EOF. Because
npiggin@suse.de7bb46a62010-05-27 01:05:33 +10002335 * truncate writes the inode size before removing pages, once we have the
David Chinner54171692007-07-19 17:39:55 +10002336 * page lock we can determine safely if the page is beyond EOF. If it is not
2337 * beyond EOF, then the page is guaranteed safe against truncation until we
2338 * unlock the page.
Jan Karaea13a862011-05-24 00:23:35 +02002339 *
2340 * Direct callers of this function should call vfs_check_frozen() so that page
2341 * fault does not busyloop until the fs is thawed.
David Chinner54171692007-07-19 17:39:55 +10002342 */
Jan Kara24da4fa2011-05-24 00:23:34 +02002343int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2344 get_block_t get_block)
David Chinner54171692007-07-19 17:39:55 +10002345{
Nick Pigginc2ec1752009-03-31 15:23:21 -07002346 struct page *page = vmf->page;
David Chinner54171692007-07-19 17:39:55 +10002347 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2348 unsigned long end;
2349 loff_t size;
Jan Kara24da4fa2011-05-24 00:23:34 +02002350 int ret;
David Chinner54171692007-07-19 17:39:55 +10002351
2352 lock_page(page);
2353 size = i_size_read(inode);
2354 if ((page->mapping != inode->i_mapping) ||
Nick Piggin18336332007-07-20 00:31:45 -07002355 (page_offset(page) > size)) {
Jan Kara24da4fa2011-05-24 00:23:34 +02002356 /* We overload EFAULT to mean page got truncated */
2357 ret = -EFAULT;
2358 goto out_unlock;
David Chinner54171692007-07-19 17:39:55 +10002359 }
2360
2361 /* page is wholly or partially inside EOF */
2362 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2363 end = size & ~PAGE_CACHE_MASK;
2364 else
2365 end = PAGE_CACHE_SIZE;
2366
Christoph Hellwigebdec242010-10-06 10:47:23 +02002367 ret = __block_write_begin(page, 0, end, get_block);
David Chinner54171692007-07-19 17:39:55 +10002368 if (!ret)
2369 ret = block_commit_write(page, 0, end);
2370
Jan Kara24da4fa2011-05-24 00:23:34 +02002371 if (unlikely(ret < 0))
2372 goto out_unlock;
Jan Karaea13a862011-05-24 00:23:35 +02002373 /*
2374 * Freezing in progress? We check after the page is marked dirty and
2375 * with page lock held so if the test here fails, we are sure freezing
2376 * code will wait during syncing until the page fault is done - at that
2377 * point page will be dirty and unlocked so freezing code will write it
2378 * and writeprotect it again.
2379 */
2380 set_page_dirty(page);
2381 if (inode->i_sb->s_frozen != SB_UNFROZEN) {
2382 ret = -EAGAIN;
2383 goto out_unlock;
2384 }
Jan Kara24da4fa2011-05-24 00:23:34 +02002385 return 0;
2386out_unlock:
2387 unlock_page(page);
David Chinner54171692007-07-19 17:39:55 +10002388 return ret;
2389}
Jan Kara24da4fa2011-05-24 00:23:34 +02002390EXPORT_SYMBOL(__block_page_mkwrite);
2391
2392int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2393 get_block_t get_block)
2394{
Jan Karaea13a862011-05-24 00:23:35 +02002395 int ret;
2396 struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
Jan Kara24da4fa2011-05-24 00:23:34 +02002397
Jan Karaea13a862011-05-24 00:23:35 +02002398 /*
2399 * This check is racy but catches the common case. The check in
2400 * __block_page_mkwrite() is reliable.
2401 */
2402 vfs_check_frozen(sb, SB_FREEZE_WRITE);
2403 ret = __block_page_mkwrite(vma, vmf, get_block);
Jan Kara24da4fa2011-05-24 00:23:34 +02002404 return block_page_mkwrite_return(ret);
2405}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002406EXPORT_SYMBOL(block_page_mkwrite);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407
2408/*
Nick Piggin03158cd2007-10-16 01:25:25 -07002409 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 * immediately, while under the page lock. So it needs a special end_io
2411 * handler which does not touch the bh after unlocking it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 */
2413static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2414{
Dmitry Monakhov68671f32007-10-16 01:24:47 -07002415 __end_buffer_read_notouch(bh, uptodate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416}
2417
2418/*
Nick Piggin03158cd2007-10-16 01:25:25 -07002419 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2420 * the page (converting it to circular linked list and taking care of page
2421 * dirty races).
2422 */
2423static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2424{
2425 struct buffer_head *bh;
2426
2427 BUG_ON(!PageLocked(page));
2428
2429 spin_lock(&page->mapping->private_lock);
2430 bh = head;
2431 do {
2432 if (PageDirty(page))
2433 set_buffer_dirty(bh);
2434 if (!bh->b_this_page)
2435 bh->b_this_page = head;
2436 bh = bh->b_this_page;
2437 } while (bh != head);
2438 attach_page_buffers(page, head);
2439 spin_unlock(&page->mapping->private_lock);
2440}
2441
2442/*
Christoph Hellwigea0f04e2010-06-04 11:29:54 +02002443 * On entry, the page is fully not uptodate.
2444 * On exit the page is fully uptodate in the areas outside (from,to)
npiggin@suse.de7bb46a62010-05-27 01:05:33 +10002445 * The filesystem needs to handle block truncation upon failure.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 */
Christoph Hellwigea0f04e2010-06-04 11:29:54 +02002447int nobh_write_begin(struct address_space *mapping,
Nick Piggin03158cd2007-10-16 01:25:25 -07002448 loff_t pos, unsigned len, unsigned flags,
2449 struct page **pagep, void **fsdata,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 get_block_t *get_block)
2451{
Nick Piggin03158cd2007-10-16 01:25:25 -07002452 struct inode *inode = mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 const unsigned blkbits = inode->i_blkbits;
2454 const unsigned blocksize = 1 << blkbits;
Nick Piggina4b06722007-10-16 01:24:48 -07002455 struct buffer_head *head, *bh;
Nick Piggin03158cd2007-10-16 01:25:25 -07002456 struct page *page;
2457 pgoff_t index;
2458 unsigned from, to;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 unsigned block_in_page;
Nick Piggina4b06722007-10-16 01:24:48 -07002460 unsigned block_start, block_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 sector_t block_in_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462 int nr_reads = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 int ret = 0;
2464 int is_mapped_to_disk = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465
Nick Piggin03158cd2007-10-16 01:25:25 -07002466 index = pos >> PAGE_CACHE_SHIFT;
2467 from = pos & (PAGE_CACHE_SIZE - 1);
2468 to = from + len;
2469
Nick Piggin54566b22009-01-04 12:00:53 -08002470 page = grab_cache_page_write_begin(mapping, index, flags);
Nick Piggin03158cd2007-10-16 01:25:25 -07002471 if (!page)
2472 return -ENOMEM;
2473 *pagep = page;
2474 *fsdata = NULL;
2475
2476 if (page_has_buffers(page)) {
Namhyung Kim309f77a2010-10-25 15:01:12 +09002477 ret = __block_write_begin(page, pos, len, get_block);
2478 if (unlikely(ret))
2479 goto out_release;
2480 return ret;
Nick Piggin03158cd2007-10-16 01:25:25 -07002481 }
Nick Piggina4b06722007-10-16 01:24:48 -07002482
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 if (PageMappedToDisk(page))
2484 return 0;
2485
Nick Piggina4b06722007-10-16 01:24:48 -07002486 /*
2487 * Allocate buffers so that we can keep track of state, and potentially
2488 * attach them to the page if an error occurs. In the common case of
2489 * no error, they will just be freed again without ever being attached
2490 * to the page (which is all OK, because we're under the page lock).
2491 *
2492 * Be careful: the buffer linked list is a NULL terminated one, rather
2493 * than the circular one we're used to.
2494 */
2495 head = alloc_page_buffers(page, blocksize, 0);
Nick Piggin03158cd2007-10-16 01:25:25 -07002496 if (!head) {
2497 ret = -ENOMEM;
2498 goto out_release;
2499 }
Nick Piggina4b06722007-10-16 01:24:48 -07002500
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502
2503 /*
2504 * We loop across all blocks in the page, whether or not they are
2505 * part of the affected region. This is so we can discover if the
2506 * page is fully mapped-to-disk.
2507 */
Nick Piggina4b06722007-10-16 01:24:48 -07002508 for (block_start = 0, block_in_page = 0, bh = head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509 block_start < PAGE_CACHE_SIZE;
Nick Piggina4b06722007-10-16 01:24:48 -07002510 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 int create;
2512
Nick Piggina4b06722007-10-16 01:24:48 -07002513 block_end = block_start + blocksize;
2514 bh->b_state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515 create = 1;
2516 if (block_start >= to)
2517 create = 0;
2518 ret = get_block(inode, block_in_file + block_in_page,
Nick Piggina4b06722007-10-16 01:24:48 -07002519 bh, create);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 if (ret)
2521 goto failed;
Nick Piggina4b06722007-10-16 01:24:48 -07002522 if (!buffer_mapped(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523 is_mapped_to_disk = 0;
Nick Piggina4b06722007-10-16 01:24:48 -07002524 if (buffer_new(bh))
2525 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2526 if (PageUptodate(page)) {
2527 set_buffer_uptodate(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 continue;
Nick Piggina4b06722007-10-16 01:24:48 -07002529 }
2530 if (buffer_new(bh) || !buffer_mapped(bh)) {
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002531 zero_user_segments(page, block_start, from,
2532 to, block_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 continue;
2534 }
Nick Piggina4b06722007-10-16 01:24:48 -07002535 if (buffer_uptodate(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 continue; /* reiserfs does this */
2537 if (block_start < from || block_end > to) {
Nick Piggina4b06722007-10-16 01:24:48 -07002538 lock_buffer(bh);
2539 bh->b_end_io = end_buffer_read_nobh;
2540 submit_bh(READ, bh);
2541 nr_reads++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 }
2543 }
2544
2545 if (nr_reads) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546 /*
2547 * The page is locked, so these buffers are protected from
2548 * any VM or truncate activity. Hence we don't need to care
2549 * for the buffer_head refcounts.
2550 */
Nick Piggina4b06722007-10-16 01:24:48 -07002551 for (bh = head; bh; bh = bh->b_this_page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 wait_on_buffer(bh);
2553 if (!buffer_uptodate(bh))
2554 ret = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555 }
2556 if (ret)
2557 goto failed;
2558 }
2559
2560 if (is_mapped_to_disk)
2561 SetPageMappedToDisk(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562
Nick Piggin03158cd2007-10-16 01:25:25 -07002563 *fsdata = head; /* to be released by nobh_write_end */
Nick Piggina4b06722007-10-16 01:24:48 -07002564
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565 return 0;
2566
2567failed:
Nick Piggin03158cd2007-10-16 01:25:25 -07002568 BUG_ON(!ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 /*
Nick Piggina4b06722007-10-16 01:24:48 -07002570 * Error recovery is a bit difficult. We need to zero out blocks that
2571 * were newly allocated, and dirty them to ensure they get written out.
2572 * Buffers need to be attached to the page at this point, otherwise
2573 * the handling of potential IO errors during writeout would be hard
2574 * (could try doing synchronous writeout, but what if that fails too?)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575 */
Nick Piggin03158cd2007-10-16 01:25:25 -07002576 attach_nobh_buffers(page, head);
2577 page_zero_new_buffers(page, from, to);
Nick Piggina4b06722007-10-16 01:24:48 -07002578
Nick Piggin03158cd2007-10-16 01:25:25 -07002579out_release:
2580 unlock_page(page);
2581 page_cache_release(page);
2582 *pagep = NULL;
Nick Piggina4b06722007-10-16 01:24:48 -07002583
npiggin@suse.de7bb46a62010-05-27 01:05:33 +10002584 return ret;
2585}
Nick Piggin03158cd2007-10-16 01:25:25 -07002586EXPORT_SYMBOL(nobh_write_begin);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587
Nick Piggin03158cd2007-10-16 01:25:25 -07002588int nobh_write_end(struct file *file, struct address_space *mapping,
2589 loff_t pos, unsigned len, unsigned copied,
2590 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591{
2592 struct inode *inode = page->mapping->host;
Nick Pigginefdc3132007-10-21 06:57:41 +02002593 struct buffer_head *head = fsdata;
Nick Piggin03158cd2007-10-16 01:25:25 -07002594 struct buffer_head *bh;
Dmitri Monakhov5b41e742008-03-28 14:15:52 -07002595 BUG_ON(fsdata != NULL && page_has_buffers(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596
Dave Kleikampd4cf1092009-02-06 14:59:26 -06002597 if (unlikely(copied < len) && head)
Dmitri Monakhov5b41e742008-03-28 14:15:52 -07002598 attach_nobh_buffers(page, head);
2599 if (page_has_buffers(page))
2600 return generic_write_end(file, mapping, pos, len,
2601 copied, page, fsdata);
Nick Piggina4b06722007-10-16 01:24:48 -07002602
Nick Piggin22c8ca72007-02-20 13:58:09 -08002603 SetPageUptodate(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604 set_page_dirty(page);
Nick Piggin03158cd2007-10-16 01:25:25 -07002605 if (pos+copied > inode->i_size) {
2606 i_size_write(inode, pos+copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 mark_inode_dirty(inode);
2608 }
Nick Piggin03158cd2007-10-16 01:25:25 -07002609
2610 unlock_page(page);
2611 page_cache_release(page);
2612
Nick Piggin03158cd2007-10-16 01:25:25 -07002613 while (head) {
2614 bh = head;
2615 head = head->b_this_page;
2616 free_buffer_head(bh);
2617 }
2618
2619 return copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620}
Nick Piggin03158cd2007-10-16 01:25:25 -07002621EXPORT_SYMBOL(nobh_write_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622
2623/*
2624 * nobh_writepage() - based on block_full_write_page() except
2625 * that it tries to operate without attaching bufferheads to
2626 * the page.
2627 */
2628int nobh_writepage(struct page *page, get_block_t *get_block,
2629 struct writeback_control *wbc)
2630{
2631 struct inode * const inode = page->mapping->host;
2632 loff_t i_size = i_size_read(inode);
2633 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2634 unsigned offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635 int ret;
2636
2637 /* Is the page fully inside i_size? */
2638 if (page->index < end_index)
2639 goto out;
2640
2641 /* Is the page fully outside i_size? (truncate in progress) */
2642 offset = i_size & (PAGE_CACHE_SIZE-1);
2643 if (page->index >= end_index+1 || !offset) {
2644 /*
2645 * The page may have dirty, unmapped buffers. For example,
2646 * they may have been added in ext3_writepage(). Make them
2647 * freeable here, so the page does not leak.
2648 */
2649#if 0
2650 /* Not really sure about this - do we need this ? */
2651 if (page->mapping->a_ops->invalidatepage)
2652 page->mapping->a_ops->invalidatepage(page, offset);
2653#endif
2654 unlock_page(page);
2655 return 0; /* don't care */
2656 }
2657
2658 /*
2659 * The page straddles i_size. It must be zeroed out on each and every
2660 * writepage invocation because it may be mmapped. "A file is mapped
2661 * in multiples of the page size. For a file that is not a multiple of
2662 * the page size, the remaining memory is zeroed when mapped, and
2663 * writes to that region are not written out to the file."
2664 */
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002665 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666out:
2667 ret = mpage_writepage(page, get_block, wbc);
2668 if (ret == -EAGAIN)
Chris Mason35c80d52009-04-15 13:22:38 -04002669 ret = __block_write_full_page(inode, page, get_block, wbc,
2670 end_buffer_async_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671 return ret;
2672}
2673EXPORT_SYMBOL(nobh_writepage);
2674
Nick Piggin03158cd2007-10-16 01:25:25 -07002675int nobh_truncate_page(struct address_space *mapping,
2676 loff_t from, get_block_t *get_block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2679 unsigned offset = from & (PAGE_CACHE_SIZE-1);
Nick Piggin03158cd2007-10-16 01:25:25 -07002680 unsigned blocksize;
2681 sector_t iblock;
2682 unsigned length, pos;
2683 struct inode *inode = mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684 struct page *page;
Nick Piggin03158cd2007-10-16 01:25:25 -07002685 struct buffer_head map_bh;
2686 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687
Nick Piggin03158cd2007-10-16 01:25:25 -07002688 blocksize = 1 << inode->i_blkbits;
2689 length = offset & (blocksize - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690
Nick Piggin03158cd2007-10-16 01:25:25 -07002691 /* Block boundary? Nothing to do */
2692 if (!length)
2693 return 0;
2694
2695 length = blocksize - length;
2696 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2697
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698 page = grab_cache_page(mapping, index);
Nick Piggin03158cd2007-10-16 01:25:25 -07002699 err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700 if (!page)
2701 goto out;
2702
Nick Piggin03158cd2007-10-16 01:25:25 -07002703 if (page_has_buffers(page)) {
2704has_buffers:
2705 unlock_page(page);
2706 page_cache_release(page);
2707 return block_truncate_page(mapping, from, get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708 }
Nick Piggin03158cd2007-10-16 01:25:25 -07002709
2710 /* Find the buffer that contains "offset" */
2711 pos = blocksize;
2712 while (offset >= pos) {
2713 iblock++;
2714 pos += blocksize;
2715 }
2716
Theodore Ts'o460bcf52009-05-12 07:37:56 -04002717 map_bh.b_size = blocksize;
2718 map_bh.b_state = 0;
Nick Piggin03158cd2007-10-16 01:25:25 -07002719 err = get_block(inode, iblock, &map_bh, 0);
2720 if (err)
2721 goto unlock;
2722 /* unmapped? It's a hole - nothing to do */
2723 if (!buffer_mapped(&map_bh))
2724 goto unlock;
2725
2726 /* Ok, it's mapped. Make sure it's up-to-date */
2727 if (!PageUptodate(page)) {
2728 err = mapping->a_ops->readpage(NULL, page);
2729 if (err) {
2730 page_cache_release(page);
2731 goto out;
2732 }
2733 lock_page(page);
2734 if (!PageUptodate(page)) {
2735 err = -EIO;
2736 goto unlock;
2737 }
2738 if (page_has_buffers(page))
2739 goto has_buffers;
2740 }
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002741 zero_user(page, offset, length);
Nick Piggin03158cd2007-10-16 01:25:25 -07002742 set_page_dirty(page);
2743 err = 0;
2744
2745unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 unlock_page(page);
2747 page_cache_release(page);
2748out:
Nick Piggin03158cd2007-10-16 01:25:25 -07002749 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750}
2751EXPORT_SYMBOL(nobh_truncate_page);
2752
2753int block_truncate_page(struct address_space *mapping,
2754 loff_t from, get_block_t *get_block)
2755{
2756 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2757 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2758 unsigned blocksize;
Andrew Morton54b21a72006-01-08 01:03:05 -08002759 sector_t iblock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760 unsigned length, pos;
2761 struct inode *inode = mapping->host;
2762 struct page *page;
2763 struct buffer_head *bh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 int err;
2765
2766 blocksize = 1 << inode->i_blkbits;
2767 length = offset & (blocksize - 1);
2768
2769 /* Block boundary? Nothing to do */
2770 if (!length)
2771 return 0;
2772
2773 length = blocksize - length;
Andrew Morton54b21a72006-01-08 01:03:05 -08002774 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775
2776 page = grab_cache_page(mapping, index);
2777 err = -ENOMEM;
2778 if (!page)
2779 goto out;
2780
2781 if (!page_has_buffers(page))
2782 create_empty_buffers(page, blocksize, 0);
2783
2784 /* Find the buffer that contains "offset" */
2785 bh = page_buffers(page);
2786 pos = blocksize;
2787 while (offset >= pos) {
2788 bh = bh->b_this_page;
2789 iblock++;
2790 pos += blocksize;
2791 }
2792
2793 err = 0;
2794 if (!buffer_mapped(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002795 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796 err = get_block(inode, iblock, bh, 0);
2797 if (err)
2798 goto unlock;
2799 /* unmapped? It's a hole - nothing to do */
2800 if (!buffer_mapped(bh))
2801 goto unlock;
2802 }
2803
2804 /* Ok, it's mapped. Make sure it's up-to-date */
2805 if (PageUptodate(page))
2806 set_buffer_uptodate(bh);
2807
David Chinner33a266d2007-02-12 00:51:41 -08002808 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 err = -EIO;
2810 ll_rw_block(READ, 1, &bh);
2811 wait_on_buffer(bh);
2812 /* Uhhuh. Read error. Complain and punt. */
2813 if (!buffer_uptodate(bh))
2814 goto unlock;
2815 }
2816
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002817 zero_user(page, offset, length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818 mark_buffer_dirty(bh);
2819 err = 0;
2820
2821unlock:
2822 unlock_page(page);
2823 page_cache_release(page);
2824out:
2825 return err;
2826}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002827EXPORT_SYMBOL(block_truncate_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828
2829/*
2830 * The generic ->writepage function for buffer-backed address_spaces
Chris Mason35c80d52009-04-15 13:22:38 -04002831 * this form passes in the end_io handler used to finish the IO.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 */
Chris Mason35c80d52009-04-15 13:22:38 -04002833int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2834 struct writeback_control *wbc, bh_end_io_t *handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835{
2836 struct inode * const inode = page->mapping->host;
2837 loff_t i_size = i_size_read(inode);
2838 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2839 unsigned offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840
2841 /* Is the page fully inside i_size? */
2842 if (page->index < end_index)
Chris Mason35c80d52009-04-15 13:22:38 -04002843 return __block_write_full_page(inode, page, get_block, wbc,
2844 handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845
2846 /* Is the page fully outside i_size? (truncate in progress) */
2847 offset = i_size & (PAGE_CACHE_SIZE-1);
2848 if (page->index >= end_index+1 || !offset) {
2849 /*
2850 * The page may have dirty, unmapped buffers. For example,
2851 * they may have been added in ext3_writepage(). Make them
2852 * freeable here, so the page does not leak.
2853 */
Jan Karaaaa40592005-10-30 15:00:16 -08002854 do_invalidatepage(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 unlock_page(page);
2856 return 0; /* don't care */
2857 }
2858
2859 /*
2860 * The page straddles i_size. It must be zeroed out on each and every
Adam Buchbinder2a61aa42009-12-11 16:35:40 -05002861 * writepage invocation because it may be mmapped. "A file is mapped
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 * in multiples of the page size. For a file that is not a multiple of
2863 * the page size, the remaining memory is zeroed when mapped, and
2864 * writes to that region are not written out to the file."
2865 */
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002866 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
Chris Mason35c80d52009-04-15 13:22:38 -04002867 return __block_write_full_page(inode, page, get_block, wbc, handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002869EXPORT_SYMBOL(block_write_full_page_endio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870
Chris Mason35c80d52009-04-15 13:22:38 -04002871/*
2872 * The generic ->writepage function for buffer-backed address_spaces
2873 */
2874int block_write_full_page(struct page *page, get_block_t *get_block,
2875 struct writeback_control *wbc)
2876{
2877 return block_write_full_page_endio(page, get_block, wbc,
2878 end_buffer_async_write);
2879}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002880EXPORT_SYMBOL(block_write_full_page);
Chris Mason35c80d52009-04-15 13:22:38 -04002881
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2883 get_block_t *get_block)
2884{
2885 struct buffer_head tmp;
2886 struct inode *inode = mapping->host;
2887 tmp.b_state = 0;
2888 tmp.b_blocknr = 0;
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002889 tmp.b_size = 1 << inode->i_blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 get_block(inode, block, &tmp, 0);
2891 return tmp.b_blocknr;
2892}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002893EXPORT_SYMBOL(generic_block_bmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894
NeilBrown6712ecf2007-09-27 12:47:43 +02002895static void end_bio_bh_io_sync(struct bio *bio, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896{
2897 struct buffer_head *bh = bio->bi_private;
2898
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899 if (err == -EOPNOTSUPP) {
2900 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901 }
2902
Keith Mannthey08bafc02008-11-25 10:24:35 +01002903 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2904 set_bit(BH_Quiet, &bh->b_state);
2905
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2907 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908}
2909
2910int submit_bh(int rw, struct buffer_head * bh)
2911{
2912 struct bio *bio;
2913 int ret = 0;
2914
2915 BUG_ON(!buffer_locked(bh));
2916 BUG_ON(!buffer_mapped(bh));
2917 BUG_ON(!bh->b_end_io);
Aneesh Kumar K.V8fb0e342009-05-12 16:22:37 -04002918 BUG_ON(buffer_delay(bh));
2919 BUG_ON(buffer_unwritten(bh));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920
Jens Axboe48fd4f92008-08-22 10:00:36 +02002921 /*
Jens Axboe48fd4f92008-08-22 10:00:36 +02002922 * Only clear out a write error when rewriting
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923 */
Jens Axboe48fd4f92008-08-22 10:00:36 +02002924 if (test_set_buffer_req(bh) && (rw & WRITE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 clear_buffer_write_io_error(bh);
2926
2927 /*
2928 * from here on down, it's all bio -- do the initial mapping,
2929 * submit_bio -> generic_make_request may further map this bio around
2930 */
2931 bio = bio_alloc(GFP_NOIO, 1);
2932
2933 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2934 bio->bi_bdev = bh->b_bdev;
2935 bio->bi_io_vec[0].bv_page = bh->b_page;
2936 bio->bi_io_vec[0].bv_len = bh->b_size;
2937 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2938
2939 bio->bi_vcnt = 1;
2940 bio->bi_idx = 0;
2941 bio->bi_size = bh->b_size;
2942
2943 bio->bi_end_io = end_bio_bh_io_sync;
2944 bio->bi_private = bh;
2945
2946 bio_get(bio);
2947 submit_bio(rw, bio);
2948
2949 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2950 ret = -EOPNOTSUPP;
2951
2952 bio_put(bio);
2953 return ret;
2954}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07002955EXPORT_SYMBOL(submit_bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956
2957/**
2958 * ll_rw_block: low-level access to block devices (DEPRECATED)
Christoph Hellwig9cb569d2010-08-11 17:06:24 +02002959 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960 * @nr: number of &struct buffer_heads in the array
2961 * @bhs: array of pointers to &struct buffer_head
2962 *
Jan Karaa7662232005-09-06 15:19:10 -07002963 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2964 * requests an I/O operation on them, either a %READ or a %WRITE. The third
Christoph Hellwig9cb569d2010-08-11 17:06:24 +02002965 * %READA option is described in the documentation for generic_make_request()
2966 * which ll_rw_block() calls.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967 *
2968 * This function drops any buffer that it cannot get a lock on (with the
Christoph Hellwig9cb569d2010-08-11 17:06:24 +02002969 * BH_Lock state bit), any buffer that appears to be clean when doing a write
2970 * request, and any buffer that appears to be up-to-date when doing read
2971 * request. Further it marks as clean buffers that are processed for
2972 * writing (the buffer cache won't assume that they are actually clean
2973 * until the buffer gets unlocked).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974 *
2975 * ll_rw_block sets b_end_io to simple completion handler that marks
2976 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2977 * any waiters.
2978 *
2979 * All of the buffers must be for the same device, and must also be a
2980 * multiple of the current approved size for the device.
2981 */
2982void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2983{
2984 int i;
2985
2986 for (i = 0; i < nr; i++) {
2987 struct buffer_head *bh = bhs[i];
2988
Christoph Hellwig9cb569d2010-08-11 17:06:24 +02002989 if (!trylock_buffer(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990 continue;
Christoph Hellwig9cb569d2010-08-11 17:06:24 +02002991 if (rw == WRITE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992 if (test_clear_buffer_dirty(bh)) {
akpm@osdl.org76c30732005-04-16 15:24:07 -07002993 bh->b_end_io = end_buffer_write_sync;
OGAWA Hirofumie60e5c52006-02-03 03:04:43 -08002994 get_bh(bh);
Christoph Hellwig9cb569d2010-08-11 17:06:24 +02002995 submit_bh(WRITE, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996 continue;
2997 }
2998 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999 if (!buffer_uptodate(bh)) {
akpm@osdl.org76c30732005-04-16 15:24:07 -07003000 bh->b_end_io = end_buffer_read_sync;
OGAWA Hirofumie60e5c52006-02-03 03:04:43 -08003001 get_bh(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 submit_bh(rw, bh);
3003 continue;
3004 }
3005 }
3006 unlock_buffer(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003007 }
3008}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07003009EXPORT_SYMBOL(ll_rw_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010
Christoph Hellwig9cb569d2010-08-11 17:06:24 +02003011void write_dirty_buffer(struct buffer_head *bh, int rw)
3012{
3013 lock_buffer(bh);
3014 if (!test_clear_buffer_dirty(bh)) {
3015 unlock_buffer(bh);
3016 return;
3017 }
3018 bh->b_end_io = end_buffer_write_sync;
3019 get_bh(bh);
3020 submit_bh(rw, bh);
3021}
3022EXPORT_SYMBOL(write_dirty_buffer);
3023
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024/*
3025 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3026 * and then start new I/O and then wait upon it. The caller must have a ref on
3027 * the buffer_head.
3028 */
Christoph Hellwig87e99512010-08-11 17:05:45 +02003029int __sync_dirty_buffer(struct buffer_head *bh, int rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030{
3031 int ret = 0;
3032
3033 WARN_ON(atomic_read(&bh->b_count) < 1);
3034 lock_buffer(bh);
3035 if (test_clear_buffer_dirty(bh)) {
3036 get_bh(bh);
3037 bh->b_end_io = end_buffer_write_sync;
Christoph Hellwig87e99512010-08-11 17:05:45 +02003038 ret = submit_bh(rw, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039 wait_on_buffer(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040 if (!ret && !buffer_uptodate(bh))
3041 ret = -EIO;
3042 } else {
3043 unlock_buffer(bh);
3044 }
3045 return ret;
3046}
Christoph Hellwig87e99512010-08-11 17:05:45 +02003047EXPORT_SYMBOL(__sync_dirty_buffer);
3048
3049int sync_dirty_buffer(struct buffer_head *bh)
3050{
3051 return __sync_dirty_buffer(bh, WRITE_SYNC);
3052}
H Hartley Sweeten1fe72ea2009-09-22 16:43:51 -07003053EXPORT_SYMBOL(sync_dirty_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054
3055/*
3056 * try_to_free_buffers() checks if all the buffers on this particular page
3057 * are unused, and releases them if so.
3058 *
3059 * Exclusion against try_to_free_buffers may be obtained by either
3060 * locking the page or by holding its mapping's private_lock.
3061 *
3062 * If the page is dirty but all the buffers are clean then we need to
3063 * be sure to mark the page clean as well. This is because the page
3064 * may be against a block device, and a later reattachment of buffers
3065 * to a dirty page will set *all* buffers dirty. Which would corrupt
3066 * filesystem data on the same device.
3067 *
3068 * The same applies to regular filesystem pages: if all the buffers are
3069 * clean then we set the page clean and proceed. To do that, we require
3070 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3071 * private_lock.
3072 *
3073 * try_to_free_buffers() is non-blocking.
3074 */
3075static inline int buffer_busy(struct buffer_head *bh)
3076{
3077 return atomic_read(&bh->b_count) |
3078 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3079}
3080
3081static int
3082drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3083{
3084 struct buffer_head *head = page_buffers(page);
3085 struct buffer_head *bh;
3086
3087 bh = head;
3088 do {
akpm@osdl.orgde7d5a32005-05-01 08:58:39 -07003089 if (buffer_write_io_error(bh) && page->mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090 set_bit(AS_EIO, &page->mapping->flags);
3091 if (buffer_busy(bh))
3092 goto failed;
3093 bh = bh->b_this_page;
3094 } while (bh != head);
3095
3096 do {
3097 struct buffer_head *next = bh->b_this_page;
3098
Jan Kara535ee2f2008-02-08 04:21:59 -08003099 if (bh->b_assoc_map)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100 __remove_assoc_queue(bh);
3101 bh = next;
3102 } while (bh != head);
3103 *buffers_to_free = head;
3104 __clear_page_buffers(page);
3105 return 1;
3106failed:
3107 return 0;
3108}
3109
3110int try_to_free_buffers(struct page *page)
3111{
3112 struct address_space * const mapping = page->mapping;
3113 struct buffer_head *buffers_to_free = NULL;
3114 int ret = 0;
3115
3116 BUG_ON(!PageLocked(page));
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003117 if (PageWriteback(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003118 return 0;
3119
3120 if (mapping == NULL) { /* can this still happen? */
3121 ret = drop_buffers(page, &buffers_to_free);
3122 goto out;
3123 }
3124
3125 spin_lock(&mapping->private_lock);
3126 ret = drop_buffers(page, &buffers_to_free);
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003127
3128 /*
3129 * If the filesystem writes its buffers by hand (eg ext3)
3130 * then we can have clean buffers against a dirty page. We
3131 * clean the page here; otherwise the VM will never notice
3132 * that the filesystem did any IO at all.
3133 *
3134 * Also, during truncate, discard_buffer will have marked all
3135 * the page's buffers clean. We discover that here and clean
3136 * the page also.
Nick Piggin87df7242007-01-30 14:36:27 +11003137 *
3138 * private_lock must be held over this entire operation in order
3139 * to synchronise against __set_page_dirty_buffers and prevent the
3140 * dirty bit from being lost.
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003141 */
3142 if (ret)
3143 cancel_dirty_page(page, PAGE_CACHE_SIZE);
Nick Piggin87df7242007-01-30 14:36:27 +11003144 spin_unlock(&mapping->private_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145out:
3146 if (buffers_to_free) {
3147 struct buffer_head *bh = buffers_to_free;
3148
3149 do {
3150 struct buffer_head *next = bh->b_this_page;
3151 free_buffer_head(bh);
3152 bh = next;
3153 } while (bh != buffers_to_free);
3154 }
3155 return ret;
3156}
3157EXPORT_SYMBOL(try_to_free_buffers);
3158
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159/*
3160 * There are no bdflush tunables left. But distributions are
3161 * still running obsolete flush daemons, so we terminate them here.
3162 *
3163 * Use of bdflush() is deprecated and will be removed in a future kernel.
Jens Axboe5b0830c2009-09-23 19:37:09 +02003164 * The `flush-X' kernel threads fully replace bdflush daemons and this call.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165 */
Heiko Carstensbdc480e2009-01-14 14:14:12 +01003166SYSCALL_DEFINE2(bdflush, int, func, long, data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167{
3168 static int msg_count;
3169
3170 if (!capable(CAP_SYS_ADMIN))
3171 return -EPERM;
3172
3173 if (msg_count < 5) {
3174 msg_count++;
3175 printk(KERN_INFO
3176 "warning: process `%s' used the obsolete bdflush"
3177 " system call\n", current->comm);
3178 printk(KERN_INFO "Fix your initscripts?\n");
3179 }
3180
3181 if (func == 1)
3182 do_exit(0);
3183 return 0;
3184}
3185
3186/*
3187 * Buffer-head allocation
3188 */
Christoph Lametere18b8902006-12-06 20:33:20 -08003189static struct kmem_cache *bh_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190
3191/*
3192 * Once the number of bh's in the machine exceeds this level, we start
3193 * stripping them in writeback.
3194 */
3195static int max_buffer_heads;
3196
3197int buffer_heads_over_limit;
3198
3199struct bh_accounting {
3200 int nr; /* Number of live bh's */
3201 int ratelimit; /* Limit cacheline bouncing */
3202};
3203
3204static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3205
3206static void recalc_bh_state(void)
3207{
3208 int i;
3209 int tot = 0;
3210
Christoph Lameteree1be862010-12-06 11:40:05 -06003211 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003212 return;
Christoph Lameterc7b92512010-12-06 11:16:28 -06003213 __this_cpu_write(bh_accounting.ratelimit, 0);
Eric Dumazet8a143422006-03-24 03:18:10 -08003214 for_each_online_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215 tot += per_cpu(bh_accounting, i).nr;
3216 buffer_heads_over_limit = (tot > max_buffer_heads);
3217}
Christoph Lameterc7b92512010-12-06 11:16:28 -06003218
Al Virodd0fc662005-10-07 07:46:04 +01003219struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003220{
Richard Kennedy019b4d12010-03-10 15:20:33 -08003221 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003222 if (ret) {
Christoph Lametera35afb82007-05-16 22:10:57 -07003223 INIT_LIST_HEAD(&ret->b_assoc_buffers);
Christoph Lameterc7b92512010-12-06 11:16:28 -06003224 preempt_disable();
3225 __this_cpu_inc(bh_accounting.nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226 recalc_bh_state();
Christoph Lameterc7b92512010-12-06 11:16:28 -06003227 preempt_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228 }
3229 return ret;
3230}
3231EXPORT_SYMBOL(alloc_buffer_head);
3232
3233void free_buffer_head(struct buffer_head *bh)
3234{
3235 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3236 kmem_cache_free(bh_cachep, bh);
Christoph Lameterc7b92512010-12-06 11:16:28 -06003237 preempt_disable();
3238 __this_cpu_dec(bh_accounting.nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239 recalc_bh_state();
Christoph Lameterc7b92512010-12-06 11:16:28 -06003240 preempt_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241}
3242EXPORT_SYMBOL(free_buffer_head);
3243
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244static void buffer_exit_cpu(int cpu)
3245{
3246 int i;
3247 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3248
3249 for (i = 0; i < BH_LRU_SIZE; i++) {
3250 brelse(b->bhs[i]);
3251 b->bhs[i] = NULL;
3252 }
Christoph Lameterc7b92512010-12-06 11:16:28 -06003253 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
Eric Dumazet8a143422006-03-24 03:18:10 -08003254 per_cpu(bh_accounting, cpu).nr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003255}
3256
3257static int buffer_cpu_notify(struct notifier_block *self,
3258 unsigned long action, void *hcpu)
3259{
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07003260 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003261 buffer_exit_cpu((unsigned long)hcpu);
3262 return NOTIFY_OK;
3263}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264
Aneesh Kumar K.V389d1b02008-01-28 23:58:26 -05003265/**
Randy Dunlapa6b91912008-03-19 17:01:00 -07003266 * bh_uptodate_or_lock - Test whether the buffer is uptodate
Aneesh Kumar K.V389d1b02008-01-28 23:58:26 -05003267 * @bh: struct buffer_head
3268 *
3269 * Return true if the buffer is up-to-date and false,
3270 * with the buffer locked, if not.
3271 */
3272int bh_uptodate_or_lock(struct buffer_head *bh)
3273{
3274 if (!buffer_uptodate(bh)) {
3275 lock_buffer(bh);
3276 if (!buffer_uptodate(bh))
3277 return 0;
3278 unlock_buffer(bh);
3279 }
3280 return 1;
3281}
3282EXPORT_SYMBOL(bh_uptodate_or_lock);
3283
3284/**
Randy Dunlapa6b91912008-03-19 17:01:00 -07003285 * bh_submit_read - Submit a locked buffer for reading
Aneesh Kumar K.V389d1b02008-01-28 23:58:26 -05003286 * @bh: struct buffer_head
3287 *
3288 * Returns zero on success and -EIO on error.
3289 */
3290int bh_submit_read(struct buffer_head *bh)
3291{
3292 BUG_ON(!buffer_locked(bh));
3293
3294 if (buffer_uptodate(bh)) {
3295 unlock_buffer(bh);
3296 return 0;
3297 }
3298
3299 get_bh(bh);
3300 bh->b_end_io = end_buffer_read_sync;
3301 submit_bh(READ, bh);
3302 wait_on_buffer(bh);
3303 if (buffer_uptodate(bh))
3304 return 0;
3305 return -EIO;
3306}
3307EXPORT_SYMBOL(bh_submit_read);
3308
Linus Torvalds1da177e2005-04-16 15:20:36 -07003309void __init buffer_init(void)
3310{
3311 int nrpages;
3312
Christoph Lameterb98938c2008-02-04 22:28:36 -08003313 bh_cachep = kmem_cache_create("buffer_head",
3314 sizeof(struct buffer_head), 0,
3315 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3316 SLAB_MEM_SPREAD),
Richard Kennedy019b4d12010-03-10 15:20:33 -08003317 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003318
3319 /*
3320 * Limit the bh occupancy to 10% of ZONE_NORMAL
3321 */
3322 nrpages = (nr_free_buffer_pages() * 10) / 100;
3323 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3324 hotcpu_notifier(buffer_cpu_notify, 0);
3325}