blob: f5f8b15a6e4087f947a2101a46f0855048bca6f0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/kernel.h>
22#include <linux/syscalls.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
Randy Dunlap16f7e0f2006-01-11 12:17:46 -080027#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/blkdev.h>
29#include <linux/file.h>
30#include <linux/quotaops.h>
31#include <linux/highmem.h>
32#include <linux/module.h>
33#include <linux/writeback.h>
34#include <linux/hash.h>
35#include <linux/suspend.h>
36#include <linux/buffer_head.h>
Andrew Morton55e829a2006-12-10 02:19:27 -080037#include <linux/task_io_accounting_ops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/bio.h>
39#include <linux/notifier.h>
40#include <linux/cpu.h>
41#include <linux/bitops.h>
42#include <linux/mpage.h>
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070043#include <linux/bit_spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49inline void
50init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51{
52 bh->b_end_io = handler;
53 bh->b_private = private;
54}
55
56static int sync_buffer(void *word)
57{
58 struct block_device *bd;
59 struct buffer_head *bh
60 = container_of(word, struct buffer_head, b_state);
61
62 smp_mb();
63 bd = bh->b_bdev;
64 if (bd)
65 blk_run_address_space(bd->bd_inode->i_mapping);
66 io_schedule();
67 return 0;
68}
69
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -080070void __lock_buffer(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 TASK_UNINTERRUPTIBLE);
74}
75EXPORT_SYMBOL(__lock_buffer);
76
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -080077void unlock_buffer(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078{
Nick Piggin51b07fc2008-10-18 20:27:00 -070079 clear_bit_unlock(BH_Lock, &bh->b_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 smp_mb__after_clear_bit();
81 wake_up_bit(&bh->b_state, BH_Lock);
82}
83
84/*
85 * Block until a buffer comes unlocked. This doesn't stop it
86 * from becoming locked again - you have to lock it yourself
87 * if you want to preserve its state.
88 */
89void __wait_on_buffer(struct buffer_head * bh)
90{
91 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
92}
93
94static void
95__clear_page_buffers(struct page *page)
96{
97 ClearPagePrivate(page);
Hugh Dickins4c21e2f2005-10-29 18:16:40 -070098 set_page_private(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 page_cache_release(page);
100}
101
Keith Mannthey08bafc02008-11-25 10:24:35 +0100102
103static int quiet_error(struct buffer_head *bh)
104{
105 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
106 return 0;
107 return 1;
108}
109
110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111static void buffer_io_error(struct buffer_head *bh)
112{
113 char b[BDEVNAME_SIZE];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
115 bdevname(bh->b_bdev, b),
116 (unsigned long long)bh->b_blocknr);
117}
118
119/*
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700120 * End-of-IO handler helper function which does not touch the bh after
121 * unlocking it.
122 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
123 * a race there is benign: unlock_buffer() only use the bh's address for
124 * hashing after unlocking the buffer, so it doesn't actually touch the bh
125 * itself.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 */
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700127static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128{
129 if (uptodate) {
130 set_buffer_uptodate(bh);
131 } else {
132 /* This happens, due to failed READA attempts. */
133 clear_buffer_uptodate(bh);
134 }
135 unlock_buffer(bh);
Dmitry Monakhov68671f32007-10-16 01:24:47 -0700136}
137
138/*
139 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
140 * unlock the buffer. This is what ll_rw_block uses too.
141 */
142void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
143{
144 __end_buffer_read_notouch(bh, uptodate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 put_bh(bh);
146}
147
148void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
149{
150 char b[BDEVNAME_SIZE];
151
152 if (uptodate) {
153 set_buffer_uptodate(bh);
154 } else {
Keith Mannthey08bafc02008-11-25 10:24:35 +0100155 if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 buffer_io_error(bh);
157 printk(KERN_WARNING "lost page write due to "
158 "I/O error on %s\n",
159 bdevname(bh->b_bdev, b));
160 }
161 set_buffer_write_io_error(bh);
162 clear_buffer_uptodate(bh);
163 }
164 unlock_buffer(bh);
165 put_bh(bh);
166}
167
168/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 * Various filesystems appear to want __find_get_block to be non-blocking.
170 * But it's the page lock which protects the buffers. To get around this,
171 * we get exclusion from try_to_free_buffers with the blockdev mapping's
172 * private_lock.
173 *
174 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
175 * may be quite high. This code could TryLock the page, and if that
176 * succeeds, there is no need to take private_lock. (But if
177 * private_lock is contended then so is mapping->tree_lock).
178 */
179static struct buffer_head *
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -0800180__find_get_block_slow(struct block_device *bdev, sector_t block)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181{
182 struct inode *bd_inode = bdev->bd_inode;
183 struct address_space *bd_mapping = bd_inode->i_mapping;
184 struct buffer_head *ret = NULL;
185 pgoff_t index;
186 struct buffer_head *bh;
187 struct buffer_head *head;
188 struct page *page;
189 int all_mapped = 1;
190
191 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
192 page = find_get_page(bd_mapping, index);
193 if (!page)
194 goto out;
195
196 spin_lock(&bd_mapping->private_lock);
197 if (!page_has_buffers(page))
198 goto out_unlock;
199 head = page_buffers(page);
200 bh = head;
201 do {
202 if (bh->b_blocknr == block) {
203 ret = bh;
204 get_bh(bh);
205 goto out_unlock;
206 }
207 if (!buffer_mapped(bh))
208 all_mapped = 0;
209 bh = bh->b_this_page;
210 } while (bh != head);
211
212 /* we might be here because some of the buffers on this page are
213 * not mapped. This is due to various races between
214 * file io on the block device and getblk. It gets dealt with
215 * elsewhere, don't buffer_error if we had some unmapped buffers
216 */
217 if (all_mapped) {
218 printk("__find_get_block_slow() failed. "
219 "block=%llu, b_blocknr=%llu\n",
Badari Pulavarty205f87f2006-03-26 01:38:00 -0800220 (unsigned long long)block,
221 (unsigned long long)bh->b_blocknr);
222 printk("b_state=0x%08lx, b_size=%zu\n",
223 bh->b_state, bh->b_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
225 }
226out_unlock:
227 spin_unlock(&bd_mapping->private_lock);
228 page_cache_release(page);
229out:
230 return ret;
231}
232
233/* If invalidate_buffers() will trash dirty buffers, it means some kind
234 of fs corruption is going on. Trashing dirty data always imply losing
235 information that was supposed to be just stored on the physical layer
236 by the user.
237
238 Thus invalidate_buffers in general usage is not allwowed to trash
239 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
240 be preserved. These buffers are simply skipped.
241
242 We also skip buffers which are still in use. For example this can
243 happen if a userspace program is reading the block device.
244
245 NOTE: In the case where the user removed a removable-media-disk even if
246 there's still dirty data not synced on disk (due a bug in the device driver
247 or due an error of the user), by not destroying the dirty buffers we could
248 generate corruption also on the next media inserted, thus a parameter is
249 necessary to handle this case in the most safe way possible (trying
250 to not corrupt also the new disk inserted with the data belonging to
251 the old now corrupted disk). Also for the ramdisk the natural thing
252 to do in order to release the ramdisk memory is to destroy dirty buffers.
253
254 These are two special cases. Normal usage imply the device driver
255 to issue a sync on the device (without waiting I/O completion) and
256 then an invalidate_buffers call that doesn't trash dirty buffers.
257
258 For handling cache coherency with the blkdev pagecache the 'update' case
259 is been introduced. It is needed to re-read from disk any pinned
260 buffer. NOTE: re-reading from disk is destructive so we can do it only
261 when we assume nobody is changing the buffercache under our I/O and when
262 we think the disk contains more recent information than the buffercache.
263 The update == 1 pass marks the buffers we need to update, the update == 2
264 pass does the actual I/O. */
Peter Zijlstraf98393a2007-05-06 14:49:54 -0700265void invalidate_bdev(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
Andrew Morton0e1dfc62006-07-30 03:03:28 -0700267 struct address_space *mapping = bdev->bd_inode->i_mapping;
268
269 if (mapping->nrpages == 0)
270 return;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 invalidate_bh_lrus();
Andrew Mortonfc0ecff2007-02-10 01:45:39 -0800273 invalidate_mapping_pages(mapping, 0, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274}
275
276/*
277 * Kick pdflush then try to free up some ZONE_NORMAL memory.
278 */
279static void free_more_memory(void)
280{
Mel Gorman19770b32008-04-28 02:12:18 -0700281 struct zone *zone;
Mel Gorman0e884602008-04-28 02:12:14 -0700282 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
Pekka J Enberg687a21c2005-06-28 20:44:55 -0700284 wakeup_pdflush(1024);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 yield();
286
Mel Gorman0e884602008-04-28 02:12:14 -0700287 for_each_online_node(nid) {
Mel Gorman19770b32008-04-28 02:12:18 -0700288 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
289 gfp_zone(GFP_NOFS), NULL,
290 &zone);
291 if (zone)
Mel Gorman54a6eb52008-04-28 02:12:16 -0700292 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
KAMEZAWA Hiroyuki327c0e92009-03-31 15:23:31 -0700293 GFP_NOFS, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 }
295}
296
297/*
298 * I/O completion handler for block_read_full_page() - pages
299 * which come unlocked at the end of I/O.
300 */
301static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
302{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 unsigned long flags;
Nick Piggina3972202005-07-07 17:56:56 -0700304 struct buffer_head *first;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 struct buffer_head *tmp;
306 struct page *page;
307 int page_uptodate = 1;
308
309 BUG_ON(!buffer_async_read(bh));
310
311 page = bh->b_page;
312 if (uptodate) {
313 set_buffer_uptodate(bh);
314 } else {
315 clear_buffer_uptodate(bh);
Keith Mannthey08bafc02008-11-25 10:24:35 +0100316 if (!quiet_error(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 buffer_io_error(bh);
318 SetPageError(page);
319 }
320
321 /*
322 * Be _very_ careful from here on. Bad things can happen if
323 * two buffer heads end IO at almost the same time and both
324 * decide that the page is now completely done.
325 */
Nick Piggina3972202005-07-07 17:56:56 -0700326 first = page_buffers(page);
327 local_irq_save(flags);
328 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 clear_buffer_async_read(bh);
330 unlock_buffer(bh);
331 tmp = bh;
332 do {
333 if (!buffer_uptodate(tmp))
334 page_uptodate = 0;
335 if (buffer_async_read(tmp)) {
336 BUG_ON(!buffer_locked(tmp));
337 goto still_busy;
338 }
339 tmp = tmp->b_this_page;
340 } while (tmp != bh);
Nick Piggina3972202005-07-07 17:56:56 -0700341 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
342 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
344 /*
345 * If none of the buffers had errors and they are all
346 * uptodate then we can set the page uptodate.
347 */
348 if (page_uptodate && !PageError(page))
349 SetPageUptodate(page);
350 unlock_page(page);
351 return;
352
353still_busy:
Nick Piggina3972202005-07-07 17:56:56 -0700354 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
355 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 return;
357}
358
359/*
360 * Completion handler for block_write_full_page() - pages which are unlocked
361 * during I/O, and which have PageWriteback cleared upon I/O completion.
362 */
Adrian Bunkb6cd0b72006-06-27 02:53:54 -0700363static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364{
365 char b[BDEVNAME_SIZE];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 unsigned long flags;
Nick Piggina3972202005-07-07 17:56:56 -0700367 struct buffer_head *first;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 struct buffer_head *tmp;
369 struct page *page;
370
371 BUG_ON(!buffer_async_write(bh));
372
373 page = bh->b_page;
374 if (uptodate) {
375 set_buffer_uptodate(bh);
376 } else {
Keith Mannthey08bafc02008-11-25 10:24:35 +0100377 if (!quiet_error(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 buffer_io_error(bh);
379 printk(KERN_WARNING "lost page write due to "
380 "I/O error on %s\n",
381 bdevname(bh->b_bdev, b));
382 }
383 set_bit(AS_EIO, &page->mapping->flags);
Jan Kara58ff4072006-10-17 00:10:19 -0700384 set_buffer_write_io_error(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 clear_buffer_uptodate(bh);
386 SetPageError(page);
387 }
388
Nick Piggina3972202005-07-07 17:56:56 -0700389 first = page_buffers(page);
390 local_irq_save(flags);
391 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 clear_buffer_async_write(bh);
394 unlock_buffer(bh);
395 tmp = bh->b_this_page;
396 while (tmp != bh) {
397 if (buffer_async_write(tmp)) {
398 BUG_ON(!buffer_locked(tmp));
399 goto still_busy;
400 }
401 tmp = tmp->b_this_page;
402 }
Nick Piggina3972202005-07-07 17:56:56 -0700403 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
404 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 end_page_writeback(page);
406 return;
407
408still_busy:
Nick Piggina3972202005-07-07 17:56:56 -0700409 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
410 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 return;
412}
413
414/*
415 * If a page's buffers are under async readin (end_buffer_async_read
416 * completion) then there is a possibility that another thread of
417 * control could lock one of the buffers after it has completed
418 * but while some of the other buffers have not completed. This
419 * locked buffer would confuse end_buffer_async_read() into not unlocking
420 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
421 * that this buffer is not under async I/O.
422 *
423 * The page comes unlocked when it has no locked buffer_async buffers
424 * left.
425 *
426 * PageLocked prevents anyone starting new async I/O reads any of
427 * the buffers.
428 *
429 * PageWriteback is used to prevent simultaneous writeout of the same
430 * page.
431 *
432 * PageLocked prevents anyone from starting writeback of a page which is
433 * under read I/O (PageWriteback is only ever set against a locked page).
434 */
435static void mark_buffer_async_read(struct buffer_head *bh)
436{
437 bh->b_end_io = end_buffer_async_read;
438 set_buffer_async_read(bh);
439}
440
441void mark_buffer_async_write(struct buffer_head *bh)
442{
443 bh->b_end_io = end_buffer_async_write;
444 set_buffer_async_write(bh);
445}
446EXPORT_SYMBOL(mark_buffer_async_write);
447
448
449/*
450 * fs/buffer.c contains helper functions for buffer-backed address space's
451 * fsync functions. A common requirement for buffer-based filesystems is
452 * that certain data from the backing blockdev needs to be written out for
453 * a successful fsync(). For example, ext2 indirect blocks need to be
454 * written back and waited upon before fsync() returns.
455 *
456 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
457 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
458 * management of a list of dependent buffers at ->i_mapping->private_list.
459 *
460 * Locking is a little subtle: try_to_free_buffers() will remove buffers
461 * from their controlling inode's queue when they are being freed. But
462 * try_to_free_buffers() will be operating against the *blockdev* mapping
463 * at the time, not against the S_ISREG file which depends on those buffers.
464 * So the locking for private_list is via the private_lock in the address_space
465 * which backs the buffers. Which is different from the address_space
466 * against which the buffers are listed. So for a particular address_space,
467 * mapping->private_lock does *not* protect mapping->private_list! In fact,
468 * mapping->private_list will always be protected by the backing blockdev's
469 * ->private_lock.
470 *
471 * Which introduces a requirement: all buffers on an address_space's
472 * ->private_list must be from the same address_space: the blockdev's.
473 *
474 * address_spaces which do not place buffers at ->private_list via these
475 * utility functions are free to use private_lock and private_list for
476 * whatever they want. The only requirement is that list_empty(private_list)
477 * be true at clear_inode() time.
478 *
479 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
480 * filesystems should do that. invalidate_inode_buffers() should just go
481 * BUG_ON(!list_empty).
482 *
483 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
484 * take an address_space, not an inode. And it should be called
485 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
486 * queued up.
487 *
488 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
489 * list if it is already on a list. Because if the buffer is on a list,
490 * it *must* already be on the right one. If not, the filesystem is being
491 * silly. This will save a ton of locking. But first we have to ensure
492 * that buffers are taken *off* the old inode's list when they are freed
493 * (presumably in truncate). That requires careful auditing of all
494 * filesystems (do it inside bforget()). It could also be done by bringing
495 * b_inode back.
496 */
497
498/*
499 * The buffer's backing address_space's private_lock must be held
500 */
Thomas Petazzonidbacefc2008-07-29 22:33:47 -0700501static void __remove_assoc_queue(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502{
503 list_del_init(&bh->b_assoc_buffers);
Jan Kara58ff4072006-10-17 00:10:19 -0700504 WARN_ON(!bh->b_assoc_map);
505 if (buffer_write_io_error(bh))
506 set_bit(AS_EIO, &bh->b_assoc_map->flags);
507 bh->b_assoc_map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508}
509
510int inode_has_buffers(struct inode *inode)
511{
512 return !list_empty(&inode->i_data.private_list);
513}
514
515/*
516 * osync is designed to support O_SYNC io. It waits synchronously for
517 * all already-submitted IO to complete, but does not queue any new
518 * writes to the disk.
519 *
520 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
521 * you dirty the buffers, and then use osync_inode_buffers to wait for
522 * completion. Any other dirty buffers which are not yet queued for
523 * write will not be flushed to disk by the osync.
524 */
525static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
526{
527 struct buffer_head *bh;
528 struct list_head *p;
529 int err = 0;
530
531 spin_lock(lock);
532repeat:
533 list_for_each_prev(p, list) {
534 bh = BH_ENTRY(p);
535 if (buffer_locked(bh)) {
536 get_bh(bh);
537 spin_unlock(lock);
538 wait_on_buffer(bh);
539 if (!buffer_uptodate(bh))
540 err = -EIO;
541 brelse(bh);
542 spin_lock(lock);
543 goto repeat;
544 }
545 }
546 spin_unlock(lock);
547 return err;
548}
549
Eric Sandeenc2d75432009-03-31 15:23:46 -0700550void do_thaw_all(unsigned long unused)
551{
552 struct super_block *sb;
553 char b[BDEVNAME_SIZE];
554
555 spin_lock(&sb_lock);
556restart:
557 list_for_each_entry(sb, &super_blocks, s_list) {
558 sb->s_count++;
559 spin_unlock(&sb_lock);
560 down_read(&sb->s_umount);
561 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
562 printk(KERN_WARNING "Emergency Thaw on %s\n",
563 bdevname(sb->s_bdev, b));
564 up_read(&sb->s_umount);
565 spin_lock(&sb_lock);
566 if (__put_super_and_need_restart(sb))
567 goto restart;
568 }
569 spin_unlock(&sb_lock);
570 printk(KERN_WARNING "Emergency Thaw complete\n");
571}
572
573/**
574 * emergency_thaw_all -- forcibly thaw every frozen filesystem
575 *
576 * Used for emergency unfreeze of all filesystems via SysRq
577 */
578void emergency_thaw_all(void)
579{
580 pdflush_operation(do_thaw_all, 0);
581}
582
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583/**
Randy Dunlap78a4a502008-02-29 22:02:31 -0800584 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
Martin Waitz67be2dd2005-05-01 08:59:26 -0700585 * @mapping: the mapping which wants those buffers written
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 *
587 * Starts I/O against the buffers at mapping->private_list, and waits upon
588 * that I/O.
589 *
Martin Waitz67be2dd2005-05-01 08:59:26 -0700590 * Basically, this is a convenience function for fsync().
591 * @mapping is a file or directory which needs those buffers to be written for
592 * a successful fsync().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 */
594int sync_mapping_buffers(struct address_space *mapping)
595{
596 struct address_space *buffer_mapping = mapping->assoc_mapping;
597
598 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
599 return 0;
600
601 return fsync_buffers_list(&buffer_mapping->private_lock,
602 &mapping->private_list);
603}
604EXPORT_SYMBOL(sync_mapping_buffers);
605
606/*
607 * Called when we've recently written block `bblock', and it is known that
608 * `bblock' was for a buffer_boundary() buffer. This means that the block at
609 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
610 * dirty, schedule it for IO. So that indirects merge nicely with their data.
611 */
612void write_boundary_block(struct block_device *bdev,
613 sector_t bblock, unsigned blocksize)
614{
615 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
616 if (bh) {
617 if (buffer_dirty(bh))
618 ll_rw_block(WRITE, 1, &bh);
619 put_bh(bh);
620 }
621}
622
623void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
624{
625 struct address_space *mapping = inode->i_mapping;
626 struct address_space *buffer_mapping = bh->b_page->mapping;
627
628 mark_buffer_dirty(bh);
629 if (!mapping->assoc_mapping) {
630 mapping->assoc_mapping = buffer_mapping;
631 } else {
Eric Sesterhenne827f922006-03-26 18:24:46 +0200632 BUG_ON(mapping->assoc_mapping != buffer_mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 }
Jan Kara535ee2f2008-02-08 04:21:59 -0800634 if (!bh->b_assoc_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 spin_lock(&buffer_mapping->private_lock);
636 list_move_tail(&bh->b_assoc_buffers,
637 &mapping->private_list);
Jan Kara58ff4072006-10-17 00:10:19 -0700638 bh->b_assoc_map = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 spin_unlock(&buffer_mapping->private_lock);
640 }
641}
642EXPORT_SYMBOL(mark_buffer_dirty_inode);
643
644/*
Nick Piggin787d2212007-07-17 04:03:34 -0700645 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
646 * dirty.
647 *
648 * If warn is true, then emit a warning if the page is not uptodate and has
649 * not been truncated.
650 */
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700651static void __set_page_dirty(struct page *page,
Nick Piggin787d2212007-07-17 04:03:34 -0700652 struct address_space *mapping, int warn)
653{
Nick Piggin19fd6232008-07-25 19:45:32 -0700654 spin_lock_irq(&mapping->tree_lock);
Nick Piggin787d2212007-07-17 04:03:34 -0700655 if (page->mapping) { /* Race with truncate? */
656 WARN_ON_ONCE(warn && !PageUptodate(page));
Edward Shishkine3a7cca2009-03-31 15:19:39 -0700657 account_page_dirtied(page, mapping);
Nick Piggin787d2212007-07-17 04:03:34 -0700658 radix_tree_tag_set(&mapping->page_tree,
659 page_index(page), PAGECACHE_TAG_DIRTY);
660 }
Nick Piggin19fd6232008-07-25 19:45:32 -0700661 spin_unlock_irq(&mapping->tree_lock);
Nick Piggin787d2212007-07-17 04:03:34 -0700662 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
Nick Piggin787d2212007-07-17 04:03:34 -0700663}
664
665/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 * Add a page to the dirty page list.
667 *
668 * It is a sad fact of life that this function is called from several places
669 * deeply under spinlocking. It may not sleep.
670 *
671 * If the page has buffers, the uptodate buffers are set dirty, to preserve
672 * dirty-state coherency between the page and the buffers. It the page does
673 * not have buffers then when they are later attached they will all be set
674 * dirty.
675 *
676 * The buffers are dirtied before the page is dirtied. There's a small race
677 * window in which a writepage caller may see the page cleanness but not the
678 * buffer dirtiness. That's fine. If this code were to set the page dirty
679 * before the buffers, a concurrent writepage caller could clear the page dirty
680 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
681 * page on the dirty page list.
682 *
683 * We use private_lock to lock against try_to_free_buffers while using the
684 * page's buffer list. Also use this to protect against clean buffers being
685 * added to the page after it was set dirty.
686 *
687 * FIXME: may need to call ->reservepage here as well. That's rather up to the
688 * address_space though.
689 */
690int __set_page_dirty_buffers(struct page *page)
691{
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700692 int newly_dirty;
Nick Piggin787d2212007-07-17 04:03:34 -0700693 struct address_space *mapping = page_mapping(page);
Nick Pigginebf7a222006-10-10 04:36:54 +0200694
695 if (unlikely(!mapping))
696 return !TestSetPageDirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
698 spin_lock(&mapping->private_lock);
699 if (page_has_buffers(page)) {
700 struct buffer_head *head = page_buffers(page);
701 struct buffer_head *bh = head;
702
703 do {
704 set_buffer_dirty(bh);
705 bh = bh->b_this_page;
706 } while (bh != head);
707 }
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700708 newly_dirty = !TestSetPageDirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 spin_unlock(&mapping->private_lock);
710
Linus Torvaldsa8e7d492009-03-19 11:32:05 -0700711 if (newly_dirty)
712 __set_page_dirty(page, mapping, 1);
713 return newly_dirty;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714}
715EXPORT_SYMBOL(__set_page_dirty_buffers);
716
717/*
718 * Write out and wait upon a list of buffers.
719 *
720 * We have conflicting pressures: we want to make sure that all
721 * initially dirty buffers get waited on, but that any subsequently
722 * dirtied buffers don't. After all, we don't want fsync to last
723 * forever if somebody is actively writing to the file.
724 *
725 * Do this in two main stages: first we copy dirty buffers to a
726 * temporary inode list, queueing the writes as we go. Then we clean
727 * up, waiting for those writes to complete.
728 *
729 * During this second stage, any subsequent updates to the file may end
730 * up refiling the buffer on the original inode's dirty list again, so
731 * there is a chance we will end up with a buffer queued for write but
732 * not yet completed on that list. So, as a final cleanup we go through
733 * the osync code to catch these locked, dirty buffers without requeuing
734 * any newly dirty buffers for write.
735 */
736static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
737{
738 struct buffer_head *bh;
739 struct list_head tmp;
Jan Kara535ee2f2008-02-08 04:21:59 -0800740 struct address_space *mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 int err = 0, err2;
742
743 INIT_LIST_HEAD(&tmp);
744
745 spin_lock(lock);
746 while (!list_empty(list)) {
747 bh = BH_ENTRY(list->next);
Jan Kara535ee2f2008-02-08 04:21:59 -0800748 mapping = bh->b_assoc_map;
Jan Kara58ff4072006-10-17 00:10:19 -0700749 __remove_assoc_queue(bh);
Jan Kara535ee2f2008-02-08 04:21:59 -0800750 /* Avoid race with mark_buffer_dirty_inode() which does
751 * a lockless check and we rely on seeing the dirty bit */
752 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 if (buffer_dirty(bh) || buffer_locked(bh)) {
754 list_add(&bh->b_assoc_buffers, &tmp);
Jan Kara535ee2f2008-02-08 04:21:59 -0800755 bh->b_assoc_map = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 if (buffer_dirty(bh)) {
757 get_bh(bh);
758 spin_unlock(lock);
759 /*
760 * Ensure any pending I/O completes so that
761 * ll_rw_block() actually writes the current
762 * contents - it is a noop if I/O is still in
763 * flight on potentially older contents.
764 */
Jens Axboe18ce3752008-07-01 09:07:34 +0200765 ll_rw_block(SWRITE_SYNC, 1, &bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 brelse(bh);
767 spin_lock(lock);
768 }
769 }
770 }
771
772 while (!list_empty(&tmp)) {
773 bh = BH_ENTRY(tmp.prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 get_bh(bh);
Jan Kara535ee2f2008-02-08 04:21:59 -0800775 mapping = bh->b_assoc_map;
776 __remove_assoc_queue(bh);
777 /* Avoid race with mark_buffer_dirty_inode() which does
778 * a lockless check and we rely on seeing the dirty bit */
779 smp_mb();
780 if (buffer_dirty(bh)) {
781 list_add(&bh->b_assoc_buffers,
Jan Karae3892292008-03-04 14:28:33 -0800782 &mapping->private_list);
Jan Kara535ee2f2008-02-08 04:21:59 -0800783 bh->b_assoc_map = mapping;
784 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 spin_unlock(lock);
786 wait_on_buffer(bh);
787 if (!buffer_uptodate(bh))
788 err = -EIO;
789 brelse(bh);
790 spin_lock(lock);
791 }
792
793 spin_unlock(lock);
794 err2 = osync_buffers_list(lock, list);
795 if (err)
796 return err;
797 else
798 return err2;
799}
800
801/*
802 * Invalidate any and all dirty buffers on a given inode. We are
803 * probably unmounting the fs, but that doesn't mean we have already
804 * done a sync(). Just drop the buffers from the inode list.
805 *
806 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
807 * assumes that all the buffers are against the blockdev. Not true
808 * for reiserfs.
809 */
810void invalidate_inode_buffers(struct inode *inode)
811{
812 if (inode_has_buffers(inode)) {
813 struct address_space *mapping = &inode->i_data;
814 struct list_head *list = &mapping->private_list;
815 struct address_space *buffer_mapping = mapping->assoc_mapping;
816
817 spin_lock(&buffer_mapping->private_lock);
818 while (!list_empty(list))
819 __remove_assoc_queue(BH_ENTRY(list->next));
820 spin_unlock(&buffer_mapping->private_lock);
821 }
822}
Jan Kara52b19ac2008-09-23 18:24:08 +0200823EXPORT_SYMBOL(invalidate_inode_buffers);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824
825/*
826 * Remove any clean buffers from the inode's buffer list. This is called
827 * when we're trying to free the inode itself. Those buffers can pin it.
828 *
829 * Returns true if all buffers were removed.
830 */
831int remove_inode_buffers(struct inode *inode)
832{
833 int ret = 1;
834
835 if (inode_has_buffers(inode)) {
836 struct address_space *mapping = &inode->i_data;
837 struct list_head *list = &mapping->private_list;
838 struct address_space *buffer_mapping = mapping->assoc_mapping;
839
840 spin_lock(&buffer_mapping->private_lock);
841 while (!list_empty(list)) {
842 struct buffer_head *bh = BH_ENTRY(list->next);
843 if (buffer_dirty(bh)) {
844 ret = 0;
845 break;
846 }
847 __remove_assoc_queue(bh);
848 }
849 spin_unlock(&buffer_mapping->private_lock);
850 }
851 return ret;
852}
853
854/*
855 * Create the appropriate buffers when given a page for data area and
856 * the size of each buffer.. Use the bh->b_this_page linked list to
857 * follow the buffers created. Return NULL if unable to create more
858 * buffers.
859 *
860 * The retry flag is used to differentiate async IO (paging, swapping)
861 * which may not fail from ordinary buffer allocations.
862 */
863struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
864 int retry)
865{
866 struct buffer_head *bh, *head;
867 long offset;
868
869try_again:
870 head = NULL;
871 offset = PAGE_SIZE;
872 while ((offset -= size) >= 0) {
873 bh = alloc_buffer_head(GFP_NOFS);
874 if (!bh)
875 goto no_grow;
876
877 bh->b_bdev = NULL;
878 bh->b_this_page = head;
879 bh->b_blocknr = -1;
880 head = bh;
881
882 bh->b_state = 0;
883 atomic_set(&bh->b_count, 0);
Chris Masonfc5cd582006-02-01 03:06:48 -0800884 bh->b_private = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 bh->b_size = size;
886
887 /* Link the buffer to its page */
888 set_bh_page(bh, page, offset);
889
Nathan Scott01ffe332006-01-17 09:02:07 +1100890 init_buffer(bh, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 }
892 return head;
893/*
894 * In case anything failed, we just free everything we got.
895 */
896no_grow:
897 if (head) {
898 do {
899 bh = head;
900 head = head->b_this_page;
901 free_buffer_head(bh);
902 } while (head);
903 }
904
905 /*
906 * Return failure for non-async IO requests. Async IO requests
907 * are not allowed to fail, so we have to wait until buffer heads
908 * become available. But we don't want tasks sleeping with
909 * partially complete buffers, so all were released above.
910 */
911 if (!retry)
912 return NULL;
913
914 /* We're _really_ low on memory. Now we just
915 * wait for old buffer heads to become free due to
916 * finishing IO. Since this is an async request and
917 * the reserve list is empty, we're sure there are
918 * async buffer heads in use.
919 */
920 free_more_memory();
921 goto try_again;
922}
923EXPORT_SYMBOL_GPL(alloc_page_buffers);
924
925static inline void
926link_dev_buffers(struct page *page, struct buffer_head *head)
927{
928 struct buffer_head *bh, *tail;
929
930 bh = head;
931 do {
932 tail = bh;
933 bh = bh->b_this_page;
934 } while (bh);
935 tail->b_this_page = head;
936 attach_page_buffers(page, head);
937}
938
939/*
940 * Initialise the state of a blockdev page's buffers.
941 */
942static void
943init_page_buffers(struct page *page, struct block_device *bdev,
944 sector_t block, int size)
945{
946 struct buffer_head *head = page_buffers(page);
947 struct buffer_head *bh = head;
948 int uptodate = PageUptodate(page);
949
950 do {
951 if (!buffer_mapped(bh)) {
952 init_buffer(bh, NULL, NULL);
953 bh->b_bdev = bdev;
954 bh->b_blocknr = block;
955 if (uptodate)
956 set_buffer_uptodate(bh);
957 set_buffer_mapped(bh);
958 }
959 block++;
960 bh = bh->b_this_page;
961 } while (bh != head);
962}
963
964/*
965 * Create the page-cache page that contains the requested block.
966 *
967 * This is user purely for blockdev mappings.
968 */
969static struct page *
970grow_dev_page(struct block_device *bdev, sector_t block,
971 pgoff_t index, int size)
972{
973 struct inode *inode = bdev->bd_inode;
974 struct page *page;
975 struct buffer_head *bh;
976
Christoph Lameterea125892007-05-16 22:11:21 -0700977 page = find_or_create_page(inode->i_mapping, index,
Mel Gorman769848c2007-07-17 04:03:05 -0700978 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 if (!page)
980 return NULL;
981
Eric Sesterhenne827f922006-03-26 18:24:46 +0200982 BUG_ON(!PageLocked(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983
984 if (page_has_buffers(page)) {
985 bh = page_buffers(page);
986 if (bh->b_size == size) {
987 init_page_buffers(page, bdev, block, size);
988 return page;
989 }
990 if (!try_to_free_buffers(page))
991 goto failed;
992 }
993
994 /*
995 * Allocate some buffers for this page
996 */
997 bh = alloc_page_buffers(page, size, 0);
998 if (!bh)
999 goto failed;
1000
1001 /*
1002 * Link the page to the buffers and initialise them. Take the
1003 * lock to be atomic wrt __find_get_block(), which does not
1004 * run under the page lock.
1005 */
1006 spin_lock(&inode->i_mapping->private_lock);
1007 link_dev_buffers(page, bh);
1008 init_page_buffers(page, bdev, block, size);
1009 spin_unlock(&inode->i_mapping->private_lock);
1010 return page;
1011
1012failed:
1013 BUG();
1014 unlock_page(page);
1015 page_cache_release(page);
1016 return NULL;
1017}
1018
1019/*
1020 * Create buffers for the specified block device block's page. If
1021 * that page was dirty, the buffers are set dirty also.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001023static int
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024grow_buffers(struct block_device *bdev, sector_t block, int size)
1025{
1026 struct page *page;
1027 pgoff_t index;
1028 int sizebits;
1029
1030 sizebits = -1;
1031 do {
1032 sizebits++;
1033 } while ((size << sizebits) < PAGE_SIZE);
1034
1035 index = block >> sizebits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036
Andrew Mortone5657932006-10-11 01:21:46 -07001037 /*
1038 * Check for a block which wants to lie outside our maximum possible
1039 * pagecache index. (this comparison is done using sector_t types).
1040 */
1041 if (unlikely(index != block >> sizebits)) {
1042 char b[BDEVNAME_SIZE];
1043
1044 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1045 "device %s\n",
Harvey Harrison8e24eea2008-04-30 00:55:09 -07001046 __func__, (unsigned long long)block,
Andrew Mortone5657932006-10-11 01:21:46 -07001047 bdevname(bdev, b));
1048 return -EIO;
1049 }
1050 block = index << sizebits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 /* Create a page with the proper size buffers.. */
1052 page = grow_dev_page(bdev, block, index, size);
1053 if (!page)
1054 return 0;
1055 unlock_page(page);
1056 page_cache_release(page);
1057 return 1;
1058}
1059
Adrian Bunk75c96f82005-05-05 16:16:09 -07001060static struct buffer_head *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061__getblk_slow(struct block_device *bdev, sector_t block, int size)
1062{
1063 /* Size must be multiple of hard sectorsize */
1064 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1065 (size < 512 || size > PAGE_SIZE))) {
1066 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1067 size);
1068 printk(KERN_ERR "hardsect size: %d\n",
1069 bdev_hardsect_size(bdev));
1070
1071 dump_stack();
1072 return NULL;
1073 }
1074
1075 for (;;) {
1076 struct buffer_head * bh;
Andrew Mortone5657932006-10-11 01:21:46 -07001077 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
1079 bh = __find_get_block(bdev, block, size);
1080 if (bh)
1081 return bh;
1082
Andrew Mortone5657932006-10-11 01:21:46 -07001083 ret = grow_buffers(bdev, block, size);
1084 if (ret < 0)
1085 return NULL;
1086 if (ret == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 free_more_memory();
1088 }
1089}
1090
1091/*
1092 * The relationship between dirty buffers and dirty pages:
1093 *
1094 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1095 * the page is tagged dirty in its radix tree.
1096 *
1097 * At all times, the dirtiness of the buffers represents the dirtiness of
1098 * subsections of the page. If the page has buffers, the page dirty bit is
1099 * merely a hint about the true dirty state.
1100 *
1101 * When a page is set dirty in its entirety, all its buffers are marked dirty
1102 * (if the page has buffers).
1103 *
1104 * When a buffer is marked dirty, its page is dirtied, but the page's other
1105 * buffers are not.
1106 *
1107 * Also. When blockdev buffers are explicitly read with bread(), they
1108 * individually become uptodate. But their backing page remains not
1109 * uptodate - even if all of its buffers are uptodate. A subsequent
1110 * block_read_full_page() against that page will discover all the uptodate
1111 * buffers, will set the page uptodate and will perform no I/O.
1112 */
1113
1114/**
1115 * mark_buffer_dirty - mark a buffer_head as needing writeout
Martin Waitz67be2dd2005-05-01 08:59:26 -07001116 * @bh: the buffer_head to mark dirty
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 *
1118 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1119 * backing page dirty, then tag the page as dirty in its address_space's radix
1120 * tree and then attach the address_space's inode to its superblock's dirty
1121 * inode list.
1122 *
1123 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1124 * mapping->tree_lock and the global inode_lock.
1125 */
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -08001126void mark_buffer_dirty(struct buffer_head *bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127{
Nick Piggin787d2212007-07-17 04:03:34 -07001128 WARN_ON_ONCE(!buffer_uptodate(bh));
Linus Torvalds1be62dc2008-04-04 14:38:17 -07001129
1130 /*
1131 * Very *carefully* optimize the it-is-already-dirty case.
1132 *
1133 * Don't let the final "is it dirty" escape to before we
1134 * perhaps modified the buffer.
1135 */
1136 if (buffer_dirty(bh)) {
1137 smp_mb();
1138 if (buffer_dirty(bh))
1139 return;
1140 }
1141
Linus Torvaldsa8e7d492009-03-19 11:32:05 -07001142 if (!test_set_buffer_dirty(bh)) {
1143 struct page *page = bh->b_page;
1144 if (!TestSetPageDirty(page))
1145 __set_page_dirty(page, page_mapping(page), 0);
1146 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147}
1148
1149/*
1150 * Decrement a buffer_head's reference count. If all buffers against a page
1151 * have zero reference count, are clean and unlocked, and if the page is clean
1152 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1153 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1154 * a page but it ends up not being freed, and buffers may later be reattached).
1155 */
1156void __brelse(struct buffer_head * buf)
1157{
1158 if (atomic_read(&buf->b_count)) {
1159 put_bh(buf);
1160 return;
1161 }
Arjan van de Ven5c752ad2008-07-25 19:45:40 -07001162 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163}
1164
1165/*
1166 * bforget() is like brelse(), except it discards any
1167 * potentially dirty data.
1168 */
1169void __bforget(struct buffer_head *bh)
1170{
1171 clear_buffer_dirty(bh);
Jan Kara535ee2f2008-02-08 04:21:59 -08001172 if (bh->b_assoc_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 struct address_space *buffer_mapping = bh->b_page->mapping;
1174
1175 spin_lock(&buffer_mapping->private_lock);
1176 list_del_init(&bh->b_assoc_buffers);
Jan Kara58ff4072006-10-17 00:10:19 -07001177 bh->b_assoc_map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 spin_unlock(&buffer_mapping->private_lock);
1179 }
1180 __brelse(bh);
1181}
1182
1183static struct buffer_head *__bread_slow(struct buffer_head *bh)
1184{
1185 lock_buffer(bh);
1186 if (buffer_uptodate(bh)) {
1187 unlock_buffer(bh);
1188 return bh;
1189 } else {
1190 get_bh(bh);
1191 bh->b_end_io = end_buffer_read_sync;
1192 submit_bh(READ, bh);
1193 wait_on_buffer(bh);
1194 if (buffer_uptodate(bh))
1195 return bh;
1196 }
1197 brelse(bh);
1198 return NULL;
1199}
1200
1201/*
1202 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1203 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1204 * refcount elevated by one when they're in an LRU. A buffer can only appear
1205 * once in a particular CPU's LRU. A single buffer can be present in multiple
1206 * CPU's LRUs at the same time.
1207 *
1208 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1209 * sb_find_get_block().
1210 *
1211 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1212 * a local interrupt disable for that.
1213 */
1214
1215#define BH_LRU_SIZE 8
1216
1217struct bh_lru {
1218 struct buffer_head *bhs[BH_LRU_SIZE];
1219};
1220
1221static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1222
1223#ifdef CONFIG_SMP
1224#define bh_lru_lock() local_irq_disable()
1225#define bh_lru_unlock() local_irq_enable()
1226#else
1227#define bh_lru_lock() preempt_disable()
1228#define bh_lru_unlock() preempt_enable()
1229#endif
1230
1231static inline void check_irqs_on(void)
1232{
1233#ifdef irqs_disabled
1234 BUG_ON(irqs_disabled());
1235#endif
1236}
1237
1238/*
1239 * The LRU management algorithm is dopey-but-simple. Sorry.
1240 */
1241static void bh_lru_install(struct buffer_head *bh)
1242{
1243 struct buffer_head *evictee = NULL;
1244 struct bh_lru *lru;
1245
1246 check_irqs_on();
1247 bh_lru_lock();
1248 lru = &__get_cpu_var(bh_lrus);
1249 if (lru->bhs[0] != bh) {
1250 struct buffer_head *bhs[BH_LRU_SIZE];
1251 int in;
1252 int out = 0;
1253
1254 get_bh(bh);
1255 bhs[out++] = bh;
1256 for (in = 0; in < BH_LRU_SIZE; in++) {
1257 struct buffer_head *bh2 = lru->bhs[in];
1258
1259 if (bh2 == bh) {
1260 __brelse(bh2);
1261 } else {
1262 if (out >= BH_LRU_SIZE) {
1263 BUG_ON(evictee != NULL);
1264 evictee = bh2;
1265 } else {
1266 bhs[out++] = bh2;
1267 }
1268 }
1269 }
1270 while (out < BH_LRU_SIZE)
1271 bhs[out++] = NULL;
1272 memcpy(lru->bhs, bhs, sizeof(bhs));
1273 }
1274 bh_lru_unlock();
1275
1276 if (evictee)
1277 __brelse(evictee);
1278}
1279
1280/*
1281 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1282 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001283static struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001284lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285{
1286 struct buffer_head *ret = NULL;
1287 struct bh_lru *lru;
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001288 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289
1290 check_irqs_on();
1291 bh_lru_lock();
1292 lru = &__get_cpu_var(bh_lrus);
1293 for (i = 0; i < BH_LRU_SIZE; i++) {
1294 struct buffer_head *bh = lru->bhs[i];
1295
1296 if (bh && bh->b_bdev == bdev &&
1297 bh->b_blocknr == block && bh->b_size == size) {
1298 if (i) {
1299 while (i) {
1300 lru->bhs[i] = lru->bhs[i - 1];
1301 i--;
1302 }
1303 lru->bhs[0] = bh;
1304 }
1305 get_bh(bh);
1306 ret = bh;
1307 break;
1308 }
1309 }
1310 bh_lru_unlock();
1311 return ret;
1312}
1313
1314/*
1315 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1316 * it in the LRU and mark it as accessed. If it is not present then return
1317 * NULL
1318 */
1319struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001320__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321{
1322 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1323
1324 if (bh == NULL) {
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -08001325 bh = __find_get_block_slow(bdev, block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 if (bh)
1327 bh_lru_install(bh);
1328 }
1329 if (bh)
1330 touch_buffer(bh);
1331 return bh;
1332}
1333EXPORT_SYMBOL(__find_get_block);
1334
1335/*
1336 * __getblk will locate (and, if necessary, create) the buffer_head
1337 * which corresponds to the passed block_device, block and size. The
1338 * returned buffer has its reference count incremented.
1339 *
1340 * __getblk() cannot fail - it just keeps trying. If you pass it an
1341 * illegal block number, __getblk() will happily return a buffer_head
1342 * which represents the non-existent block. Very weird.
1343 *
1344 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1345 * attempt is failing. FIXME, perhaps?
1346 */
1347struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001348__getblk(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349{
1350 struct buffer_head *bh = __find_get_block(bdev, block, size);
1351
1352 might_sleep();
1353 if (bh == NULL)
1354 bh = __getblk_slow(bdev, block, size);
1355 return bh;
1356}
1357EXPORT_SYMBOL(__getblk);
1358
1359/*
1360 * Do async read-ahead on a buffer..
1361 */
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001362void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363{
1364 struct buffer_head *bh = __getblk(bdev, block, size);
Andrew Mortona3e713b2005-10-30 15:03:15 -08001365 if (likely(bh)) {
1366 ll_rw_block(READA, 1, &bh);
1367 brelse(bh);
1368 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369}
1370EXPORT_SYMBOL(__breadahead);
1371
1372/**
1373 * __bread() - reads a specified block and returns the bh
Martin Waitz67be2dd2005-05-01 08:59:26 -07001374 * @bdev: the block_device to read from
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 * @block: number of block
1376 * @size: size (in bytes) to read
1377 *
1378 * Reads a specified block, and returns buffer head that contains it.
1379 * It returns NULL if the block was unreadable.
1380 */
1381struct buffer_head *
Tomasz Kvarsin3991d3b2007-02-12 00:52:14 -08001382__bread(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383{
1384 struct buffer_head *bh = __getblk(bdev, block, size);
1385
Andrew Mortona3e713b2005-10-30 15:03:15 -08001386 if (likely(bh) && !buffer_uptodate(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 bh = __bread_slow(bh);
1388 return bh;
1389}
1390EXPORT_SYMBOL(__bread);
1391
1392/*
1393 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1394 * This doesn't race because it runs in each cpu either in irq
1395 * or with preempt disabled.
1396 */
1397static void invalidate_bh_lru(void *arg)
1398{
1399 struct bh_lru *b = &get_cpu_var(bh_lrus);
1400 int i;
1401
1402 for (i = 0; i < BH_LRU_SIZE; i++) {
1403 brelse(b->bhs[i]);
1404 b->bhs[i] = NULL;
1405 }
1406 put_cpu_var(bh_lrus);
1407}
1408
Peter Zijlstraf9a14392007-05-06 14:49:55 -07001409void invalidate_bh_lrus(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410{
Jens Axboe15c8b6c2008-05-09 09:39:44 +02001411 on_each_cpu(invalidate_bh_lru, NULL, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412}
Nick Piggin9db55792008-02-08 04:19:49 -08001413EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414
1415void set_bh_page(struct buffer_head *bh,
1416 struct page *page, unsigned long offset)
1417{
1418 bh->b_page = page;
Eric Sesterhenne827f922006-03-26 18:24:46 +02001419 BUG_ON(offset >= PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 if (PageHighMem(page))
1421 /*
1422 * This catches illegal uses and preserves the offset:
1423 */
1424 bh->b_data = (char *)(0 + offset);
1425 else
1426 bh->b_data = page_address(page) + offset;
1427}
1428EXPORT_SYMBOL(set_bh_page);
1429
1430/*
1431 * Called when truncating a buffer on a page completely.
1432 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001433static void discard_buffer(struct buffer_head * bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434{
1435 lock_buffer(bh);
1436 clear_buffer_dirty(bh);
1437 bh->b_bdev = NULL;
1438 clear_buffer_mapped(bh);
1439 clear_buffer_req(bh);
1440 clear_buffer_new(bh);
1441 clear_buffer_delay(bh);
David Chinner33a266d2007-02-12 00:51:41 -08001442 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 unlock_buffer(bh);
1444}
1445
1446/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 * block_invalidatepage - invalidate part of all of a buffer-backed page
1448 *
1449 * @page: the page which is affected
1450 * @offset: the index of the truncation point
1451 *
1452 * block_invalidatepage() is called when all or part of the page has become
1453 * invalidatedby a truncate operation.
1454 *
1455 * block_invalidatepage() does not have to release all buffers, but it must
1456 * ensure that no dirty buffer is left outside @offset and that no I/O
1457 * is underway against any of the blocks which are outside the truncation
1458 * point. Because the caller is about to free (and possibly reuse) those
1459 * blocks on-disk.
1460 */
NeilBrown2ff28e22006-03-26 01:37:18 -08001461void block_invalidatepage(struct page *page, unsigned long offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462{
1463 struct buffer_head *head, *bh, *next;
1464 unsigned int curr_off = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465
1466 BUG_ON(!PageLocked(page));
1467 if (!page_has_buffers(page))
1468 goto out;
1469
1470 head = page_buffers(page);
1471 bh = head;
1472 do {
1473 unsigned int next_off = curr_off + bh->b_size;
1474 next = bh->b_this_page;
1475
1476 /*
1477 * is this block fully invalidated?
1478 */
1479 if (offset <= curr_off)
1480 discard_buffer(bh);
1481 curr_off = next_off;
1482 bh = next;
1483 } while (bh != head);
1484
1485 /*
1486 * We release buffers only if the entire page is being invalidated.
1487 * The get_block cached value has been unconditionally invalidated,
1488 * so real IO is not possible anymore.
1489 */
1490 if (offset == 0)
NeilBrown2ff28e22006-03-26 01:37:18 -08001491 try_to_release_page(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492out:
NeilBrown2ff28e22006-03-26 01:37:18 -08001493 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494}
1495EXPORT_SYMBOL(block_invalidatepage);
1496
1497/*
1498 * We attach and possibly dirty the buffers atomically wrt
1499 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1500 * is already excluded via the page lock.
1501 */
1502void create_empty_buffers(struct page *page,
1503 unsigned long blocksize, unsigned long b_state)
1504{
1505 struct buffer_head *bh, *head, *tail;
1506
1507 head = alloc_page_buffers(page, blocksize, 1);
1508 bh = head;
1509 do {
1510 bh->b_state |= b_state;
1511 tail = bh;
1512 bh = bh->b_this_page;
1513 } while (bh);
1514 tail->b_this_page = head;
1515
1516 spin_lock(&page->mapping->private_lock);
1517 if (PageUptodate(page) || PageDirty(page)) {
1518 bh = head;
1519 do {
1520 if (PageDirty(page))
1521 set_buffer_dirty(bh);
1522 if (PageUptodate(page))
1523 set_buffer_uptodate(bh);
1524 bh = bh->b_this_page;
1525 } while (bh != head);
1526 }
1527 attach_page_buffers(page, head);
1528 spin_unlock(&page->mapping->private_lock);
1529}
1530EXPORT_SYMBOL(create_empty_buffers);
1531
1532/*
1533 * We are taking a block for data and we don't want any output from any
1534 * buffer-cache aliases starting from return from that function and
1535 * until the moment when something will explicitly mark the buffer
1536 * dirty (hopefully that will not happen until we will free that block ;-)
1537 * We don't even need to mark it not-uptodate - nobody can expect
1538 * anything from a newly allocated buffer anyway. We used to used
1539 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1540 * don't want to mark the alias unmapped, for example - it would confuse
1541 * anyone who might pick it with bread() afterwards...
1542 *
1543 * Also.. Note that bforget() doesn't lock the buffer. So there can
1544 * be writeout I/O going on against recently-freed buffers. We don't
1545 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1546 * only if we really need to. That happens here.
1547 */
1548void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1549{
1550 struct buffer_head *old_bh;
1551
1552 might_sleep();
1553
Coywolf Qi Hunt385fd4c2005-11-07 00:59:39 -08001554 old_bh = __find_get_block_slow(bdev, block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 if (old_bh) {
1556 clear_buffer_dirty(old_bh);
1557 wait_on_buffer(old_bh);
1558 clear_buffer_req(old_bh);
1559 __brelse(old_bh);
1560 }
1561}
1562EXPORT_SYMBOL(unmap_underlying_metadata);
1563
1564/*
1565 * NOTE! All mapped/uptodate combinations are valid:
1566 *
1567 * Mapped Uptodate Meaning
1568 *
1569 * No No "unknown" - must do get_block()
1570 * No Yes "hole" - zero-filled
1571 * Yes No "allocated" - allocated on disk, not read in
1572 * Yes Yes "valid" - allocated and up-to-date in memory.
1573 *
1574 * "Dirty" is valid only with the last case (mapped+uptodate).
1575 */
1576
1577/*
1578 * While block_write_full_page is writing back the dirty buffers under
1579 * the page lock, whoever dirtied the buffers may decide to clean them
1580 * again at any time. We handle that by only looking at the buffer
1581 * state inside lock_buffer().
1582 *
1583 * If block_write_full_page() is called for regular writeback
1584 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1585 * locked buffer. This only can happen if someone has written the buffer
1586 * directly, with submit_bh(). At the address_space level PageWriteback
1587 * prevents this contention from occurring.
1588 */
1589static int __block_write_full_page(struct inode *inode, struct page *page,
1590 get_block_t *get_block, struct writeback_control *wbc)
1591{
1592 int err;
1593 sector_t block;
1594 sector_t last_block;
Andrew Mortonf0fbd5f2005-05-05 16:15:48 -07001595 struct buffer_head *bh, *head;
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001596 const unsigned blocksize = 1 << inode->i_blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 int nr_underway = 0;
1598
1599 BUG_ON(!PageLocked(page));
1600
1601 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1602
1603 if (!page_has_buffers(page)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001604 create_empty_buffers(page, blocksize,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 (1 << BH_Dirty)|(1 << BH_Uptodate));
1606 }
1607
1608 /*
1609 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1610 * here, and the (potentially unmapped) buffers may become dirty at
1611 * any time. If a buffer becomes dirty here after we've inspected it
1612 * then we just miss that fact, and the page stays dirty.
1613 *
1614 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1615 * handle that here by just cleaning them.
1616 */
1617
Andrew Morton54b21a72006-01-08 01:03:05 -08001618 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 head = page_buffers(page);
1620 bh = head;
1621
1622 /*
1623 * Get all the dirty buffers mapped to disk addresses and
1624 * handle any aliases from the underlying blockdev's mapping.
1625 */
1626 do {
1627 if (block > last_block) {
1628 /*
1629 * mapped buffers outside i_size will occur, because
1630 * this page can be outside i_size when there is a
1631 * truncate in progress.
1632 */
1633 /*
1634 * The buffer was zeroed by block_write_full_page()
1635 */
1636 clear_buffer_dirty(bh);
1637 set_buffer_uptodate(bh);
Alex Tomas29a814d2008-07-11 19:27:31 -04001638 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1639 buffer_dirty(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001640 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 err = get_block(inode, block, bh, 1);
1642 if (err)
1643 goto recover;
Alex Tomas29a814d2008-07-11 19:27:31 -04001644 clear_buffer_delay(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 if (buffer_new(bh)) {
1646 /* blockdev mappings never come here */
1647 clear_buffer_new(bh);
1648 unmap_underlying_metadata(bh->b_bdev,
1649 bh->b_blocknr);
1650 }
1651 }
1652 bh = bh->b_this_page;
1653 block++;
1654 } while (bh != head);
1655
1656 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 if (!buffer_mapped(bh))
1658 continue;
1659 /*
1660 * If it's a fully non-blocking write attempt and we cannot
1661 * lock the buffer then redirty the page. Note that this can
1662 * potentially cause a busy-wait loop from pdflush and kswapd
1663 * activity, but those code paths have their own higher-level
1664 * throttling.
1665 */
1666 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1667 lock_buffer(bh);
Nick Pigginca5de402008-08-02 12:02:13 +02001668 } else if (!trylock_buffer(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 redirty_page_for_writepage(wbc, page);
1670 continue;
1671 }
1672 if (test_clear_buffer_dirty(bh)) {
1673 mark_buffer_async_write(bh);
1674 } else {
1675 unlock_buffer(bh);
1676 }
1677 } while ((bh = bh->b_this_page) != head);
1678
1679 /*
1680 * The page and its buffers are protected by PageWriteback(), so we can
1681 * drop the bh refcounts early.
1682 */
1683 BUG_ON(PageWriteback(page));
1684 set_page_writeback(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685
1686 do {
1687 struct buffer_head *next = bh->b_this_page;
1688 if (buffer_async_write(bh)) {
1689 submit_bh(WRITE, bh);
1690 nr_underway++;
1691 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 bh = next;
1693 } while (bh != head);
Andrew Morton05937ba2005-05-05 16:15:47 -07001694 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695
1696 err = 0;
1697done:
1698 if (nr_underway == 0) {
1699 /*
1700 * The page was marked dirty, but the buffers were
1701 * clean. Someone wrote them back by hand with
1702 * ll_rw_block/submit_bh. A rare case.
1703 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 end_page_writeback(page);
Nick Piggin3d67f2d2007-05-06 14:49:05 -07001705
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 /*
1707 * The page and buffer_heads can be released at any time from
1708 * here on.
1709 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 }
1711 return err;
1712
1713recover:
1714 /*
1715 * ENOSPC, or some other error. We may already have added some
1716 * blocks to the file, so we need to write these out to avoid
1717 * exposing stale data.
1718 * The page is currently locked and not marked for writeback
1719 */
1720 bh = head;
1721 /* Recovery: lock and submit the mapped buffers */
1722 do {
Alex Tomas29a814d2008-07-11 19:27:31 -04001723 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1724 !buffer_delay(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 lock_buffer(bh);
1726 mark_buffer_async_write(bh);
1727 } else {
1728 /*
1729 * The buffer may have been set dirty during
1730 * attachment to a dirty page.
1731 */
1732 clear_buffer_dirty(bh);
1733 }
1734 } while ((bh = bh->b_this_page) != head);
1735 SetPageError(page);
1736 BUG_ON(PageWriteback(page));
Andrew Morton7e4c3692007-05-08 00:23:27 -07001737 mapping_set_error(page->mapping, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 set_page_writeback(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 do {
1740 struct buffer_head *next = bh->b_this_page;
1741 if (buffer_async_write(bh)) {
1742 clear_buffer_dirty(bh);
1743 submit_bh(WRITE, bh);
1744 nr_underway++;
1745 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 bh = next;
1747 } while (bh != head);
Nick Pigginffda9d32007-02-20 13:57:54 -08001748 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 goto done;
1750}
1751
Nick Pigginafddba42007-10-16 01:25:01 -07001752/*
1753 * If a page has any new buffers, zero them out here, and mark them uptodate
1754 * and dirty so they'll be written out (in order to prevent uninitialised
1755 * block data from leaking). And clear the new bit.
1756 */
1757void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1758{
1759 unsigned int block_start, block_end;
1760 struct buffer_head *head, *bh;
1761
1762 BUG_ON(!PageLocked(page));
1763 if (!page_has_buffers(page))
1764 return;
1765
1766 bh = head = page_buffers(page);
1767 block_start = 0;
1768 do {
1769 block_end = block_start + bh->b_size;
1770
1771 if (buffer_new(bh)) {
1772 if (block_end > from && block_start < to) {
1773 if (!PageUptodate(page)) {
1774 unsigned start, size;
1775
1776 start = max(from, block_start);
1777 size = min(to, block_end) - start;
1778
Christoph Lametereebd2aa2008-02-04 22:28:29 -08001779 zero_user(page, start, size);
Nick Pigginafddba42007-10-16 01:25:01 -07001780 set_buffer_uptodate(bh);
1781 }
1782
1783 clear_buffer_new(bh);
1784 mark_buffer_dirty(bh);
1785 }
1786 }
1787
1788 block_start = block_end;
1789 bh = bh->b_this_page;
1790 } while (bh != head);
1791}
1792EXPORT_SYMBOL(page_zero_new_buffers);
1793
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794static int __block_prepare_write(struct inode *inode, struct page *page,
1795 unsigned from, unsigned to, get_block_t *get_block)
1796{
1797 unsigned block_start, block_end;
1798 sector_t block;
1799 int err = 0;
1800 unsigned blocksize, bbits;
1801 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1802
1803 BUG_ON(!PageLocked(page));
1804 BUG_ON(from > PAGE_CACHE_SIZE);
1805 BUG_ON(to > PAGE_CACHE_SIZE);
1806 BUG_ON(from > to);
1807
1808 blocksize = 1 << inode->i_blkbits;
1809 if (!page_has_buffers(page))
1810 create_empty_buffers(page, blocksize, 0);
1811 head = page_buffers(page);
1812
1813 bbits = inode->i_blkbits;
1814 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1815
1816 for(bh = head, block_start = 0; bh != head || !block_start;
1817 block++, block_start=block_end, bh = bh->b_this_page) {
1818 block_end = block_start + blocksize;
1819 if (block_end <= from || block_start >= to) {
1820 if (PageUptodate(page)) {
1821 if (!buffer_uptodate(bh))
1822 set_buffer_uptodate(bh);
1823 }
1824 continue;
1825 }
1826 if (buffer_new(bh))
1827 clear_buffer_new(bh);
1828 if (!buffer_mapped(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08001829 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 err = get_block(inode, block, bh, 1);
1831 if (err)
Nick Pigginf3ddbdc2005-05-05 16:15:45 -07001832 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 if (buffer_new(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 unmap_underlying_metadata(bh->b_bdev,
1835 bh->b_blocknr);
1836 if (PageUptodate(page)) {
Nick Piggin637aff42007-10-16 01:25:00 -07001837 clear_buffer_new(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 set_buffer_uptodate(bh);
Nick Piggin637aff42007-10-16 01:25:00 -07001839 mark_buffer_dirty(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 continue;
1841 }
Christoph Lametereebd2aa2008-02-04 22:28:29 -08001842 if (block_end > to || block_start < from)
1843 zero_user_segments(page,
1844 to, block_end,
1845 block_start, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 continue;
1847 }
1848 }
1849 if (PageUptodate(page)) {
1850 if (!buffer_uptodate(bh))
1851 set_buffer_uptodate(bh);
1852 continue;
1853 }
1854 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
David Chinner33a266d2007-02-12 00:51:41 -08001855 !buffer_unwritten(bh) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 (block_start < from || block_end > to)) {
1857 ll_rw_block(READ, 1, &bh);
1858 *wait_bh++=bh;
1859 }
1860 }
1861 /*
1862 * If we issued read requests - let them complete.
1863 */
1864 while(wait_bh > wait) {
1865 wait_on_buffer(*--wait_bh);
1866 if (!buffer_uptodate(*wait_bh))
Nick Pigginf3ddbdc2005-05-05 16:15:45 -07001867 err = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 }
Nick Pigginafddba42007-10-16 01:25:01 -07001869 if (unlikely(err))
1870 page_zero_new_buffers(page, from, to);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 return err;
1872}
1873
1874static int __block_commit_write(struct inode *inode, struct page *page,
1875 unsigned from, unsigned to)
1876{
1877 unsigned block_start, block_end;
1878 int partial = 0;
1879 unsigned blocksize;
1880 struct buffer_head *bh, *head;
1881
1882 blocksize = 1 << inode->i_blkbits;
1883
1884 for(bh = head = page_buffers(page), block_start = 0;
1885 bh != head || !block_start;
1886 block_start=block_end, bh = bh->b_this_page) {
1887 block_end = block_start + blocksize;
1888 if (block_end <= from || block_start >= to) {
1889 if (!buffer_uptodate(bh))
1890 partial = 1;
1891 } else {
1892 set_buffer_uptodate(bh);
1893 mark_buffer_dirty(bh);
1894 }
Nick Pigginafddba42007-10-16 01:25:01 -07001895 clear_buffer_new(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 }
1897
1898 /*
1899 * If this is a partial write which happened to make all buffers
1900 * uptodate then we can optimize away a bogus readpage() for
1901 * the next read(). Here we 'discover' whether the page went
1902 * uptodate as a result of this (potentially partial) write.
1903 */
1904 if (!partial)
1905 SetPageUptodate(page);
1906 return 0;
1907}
1908
1909/*
Nick Pigginafddba42007-10-16 01:25:01 -07001910 * block_write_begin takes care of the basic task of block allocation and
1911 * bringing partial write blocks uptodate first.
1912 *
1913 * If *pagep is not NULL, then block_write_begin uses the locked page
1914 * at *pagep rather than allocating its own. In this case, the page will
1915 * not be unlocked or deallocated on failure.
1916 */
1917int block_write_begin(struct file *file, struct address_space *mapping,
1918 loff_t pos, unsigned len, unsigned flags,
1919 struct page **pagep, void **fsdata,
1920 get_block_t *get_block)
1921{
1922 struct inode *inode = mapping->host;
1923 int status = 0;
1924 struct page *page;
1925 pgoff_t index;
1926 unsigned start, end;
1927 int ownpage = 0;
1928
1929 index = pos >> PAGE_CACHE_SHIFT;
1930 start = pos & (PAGE_CACHE_SIZE - 1);
1931 end = start + len;
1932
1933 page = *pagep;
1934 if (page == NULL) {
1935 ownpage = 1;
Nick Piggin54566b22009-01-04 12:00:53 -08001936 page = grab_cache_page_write_begin(mapping, index, flags);
Nick Pigginafddba42007-10-16 01:25:01 -07001937 if (!page) {
1938 status = -ENOMEM;
1939 goto out;
1940 }
1941 *pagep = page;
1942 } else
1943 BUG_ON(!PageLocked(page));
1944
1945 status = __block_prepare_write(inode, page, start, end, get_block);
1946 if (unlikely(status)) {
1947 ClearPageUptodate(page);
1948
1949 if (ownpage) {
1950 unlock_page(page);
1951 page_cache_release(page);
1952 *pagep = NULL;
1953
1954 /*
1955 * prepare_write() may have instantiated a few blocks
1956 * outside i_size. Trim these off again. Don't need
1957 * i_size_read because we hold i_mutex.
1958 */
1959 if (pos + len > inode->i_size)
1960 vmtruncate(inode, inode->i_size);
1961 }
Nick Pigginafddba42007-10-16 01:25:01 -07001962 }
1963
1964out:
1965 return status;
1966}
1967EXPORT_SYMBOL(block_write_begin);
1968
1969int block_write_end(struct file *file, struct address_space *mapping,
1970 loff_t pos, unsigned len, unsigned copied,
1971 struct page *page, void *fsdata)
1972{
1973 struct inode *inode = mapping->host;
1974 unsigned start;
1975
1976 start = pos & (PAGE_CACHE_SIZE - 1);
1977
1978 if (unlikely(copied < len)) {
1979 /*
1980 * The buffers that were written will now be uptodate, so we
1981 * don't have to worry about a readpage reading them and
1982 * overwriting a partial write. However if we have encountered
1983 * a short write and only partially written into a buffer, it
1984 * will not be marked uptodate, so a readpage might come in and
1985 * destroy our partial write.
1986 *
1987 * Do the simplest thing, and just treat any short write to a
1988 * non uptodate page as a zero-length write, and force the
1989 * caller to redo the whole thing.
1990 */
1991 if (!PageUptodate(page))
1992 copied = 0;
1993
1994 page_zero_new_buffers(page, start+copied, start+len);
1995 }
1996 flush_dcache_page(page);
1997
1998 /* This could be a short (even 0-length) commit */
1999 __block_commit_write(inode, page, start, start+copied);
2000
2001 return copied;
2002}
2003EXPORT_SYMBOL(block_write_end);
2004
2005int generic_write_end(struct file *file, struct address_space *mapping,
2006 loff_t pos, unsigned len, unsigned copied,
2007 struct page *page, void *fsdata)
2008{
2009 struct inode *inode = mapping->host;
Jan Karac7d206b2008-07-11 19:27:31 -04002010 int i_size_changed = 0;
Nick Pigginafddba42007-10-16 01:25:01 -07002011
2012 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2013
2014 /*
2015 * No need to use i_size_read() here, the i_size
2016 * cannot change under us because we hold i_mutex.
2017 *
2018 * But it's important to update i_size while still holding page lock:
2019 * page writeout could otherwise come in and zero beyond i_size.
2020 */
2021 if (pos+copied > inode->i_size) {
2022 i_size_write(inode, pos+copied);
Jan Karac7d206b2008-07-11 19:27:31 -04002023 i_size_changed = 1;
Nick Pigginafddba42007-10-16 01:25:01 -07002024 }
2025
2026 unlock_page(page);
2027 page_cache_release(page);
2028
Jan Karac7d206b2008-07-11 19:27:31 -04002029 /*
2030 * Don't mark the inode dirty under page lock. First, it unnecessarily
2031 * makes the holding time of page lock longer. Second, it forces lock
2032 * ordering of page lock and transaction start for journaling
2033 * filesystems.
2034 */
2035 if (i_size_changed)
2036 mark_inode_dirty(inode);
2037
Nick Pigginafddba42007-10-16 01:25:01 -07002038 return copied;
2039}
2040EXPORT_SYMBOL(generic_write_end);
2041
2042/*
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07002043 * block_is_partially_uptodate checks whether buffers within a page are
2044 * uptodate or not.
2045 *
2046 * Returns true if all buffers which correspond to a file portion
2047 * we want to read are uptodate.
2048 */
2049int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2050 unsigned long from)
2051{
2052 struct inode *inode = page->mapping->host;
2053 unsigned block_start, block_end, blocksize;
2054 unsigned to;
2055 struct buffer_head *bh, *head;
2056 int ret = 1;
2057
2058 if (!page_has_buffers(page))
2059 return 0;
2060
2061 blocksize = 1 << inode->i_blkbits;
2062 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2063 to = from + to;
2064 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2065 return 0;
2066
2067 head = page_buffers(page);
2068 bh = head;
2069 block_start = 0;
2070 do {
2071 block_end = block_start + blocksize;
2072 if (block_end > from && block_start < to) {
2073 if (!buffer_uptodate(bh)) {
2074 ret = 0;
2075 break;
2076 }
2077 if (block_end >= to)
2078 break;
2079 }
2080 block_start = block_end;
2081 bh = bh->b_this_page;
2082 } while (bh != head);
2083
2084 return ret;
2085}
2086EXPORT_SYMBOL(block_is_partially_uptodate);
2087
2088/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 * Generic "read page" function for block devices that have the normal
2090 * get_block functionality. This is most of the block device filesystems.
2091 * Reads the page asynchronously --- the unlock_buffer() and
2092 * set/clear_buffer_uptodate() functions propagate buffer state into the
2093 * page struct once IO has completed.
2094 */
2095int block_read_full_page(struct page *page, get_block_t *get_block)
2096{
2097 struct inode *inode = page->mapping->host;
2098 sector_t iblock, lblock;
2099 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2100 unsigned int blocksize;
2101 int nr, i;
2102 int fully_mapped = 1;
2103
Matt Mackallcd7619d2005-05-01 08:59:01 -07002104 BUG_ON(!PageLocked(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105 blocksize = 1 << inode->i_blkbits;
2106 if (!page_has_buffers(page))
2107 create_empty_buffers(page, blocksize, 0);
2108 head = page_buffers(page);
2109
2110 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2111 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2112 bh = head;
2113 nr = 0;
2114 i = 0;
2115
2116 do {
2117 if (buffer_uptodate(bh))
2118 continue;
2119
2120 if (!buffer_mapped(bh)) {
Andrew Mortonc64610b2005-05-16 21:53:49 -07002121 int err = 0;
2122
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 fully_mapped = 0;
2124 if (iblock < lblock) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002125 WARN_ON(bh->b_size != blocksize);
Andrew Mortonc64610b2005-05-16 21:53:49 -07002126 err = get_block(inode, iblock, bh, 0);
2127 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128 SetPageError(page);
2129 }
2130 if (!buffer_mapped(bh)) {
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002131 zero_user(page, i * blocksize, blocksize);
Andrew Mortonc64610b2005-05-16 21:53:49 -07002132 if (!err)
2133 set_buffer_uptodate(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 continue;
2135 }
2136 /*
2137 * get_block() might have updated the buffer
2138 * synchronously
2139 */
2140 if (buffer_uptodate(bh))
2141 continue;
2142 }
2143 arr[nr++] = bh;
2144 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2145
2146 if (fully_mapped)
2147 SetPageMappedToDisk(page);
2148
2149 if (!nr) {
2150 /*
2151 * All buffers are uptodate - we can set the page uptodate
2152 * as well. But not if get_block() returned an error.
2153 */
2154 if (!PageError(page))
2155 SetPageUptodate(page);
2156 unlock_page(page);
2157 return 0;
2158 }
2159
2160 /* Stage two: lock the buffers */
2161 for (i = 0; i < nr; i++) {
2162 bh = arr[i];
2163 lock_buffer(bh);
2164 mark_buffer_async_read(bh);
2165 }
2166
2167 /*
2168 * Stage 3: start the IO. Check for uptodateness
2169 * inside the buffer lock in case another process reading
2170 * the underlying blockdev brought it uptodate (the sct fix).
2171 */
2172 for (i = 0; i < nr; i++) {
2173 bh = arr[i];
2174 if (buffer_uptodate(bh))
2175 end_buffer_async_read(bh, 1);
2176 else
2177 submit_bh(READ, bh);
2178 }
2179 return 0;
2180}
2181
2182/* utility function for filesystems that need to do work on expanding
Nick Piggin89e10782007-10-16 01:25:07 -07002183 * truncates. Uses filesystem pagecache writes to allow the filesystem to
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 * deal with the hole.
2185 */
Nick Piggin89e10782007-10-16 01:25:07 -07002186int generic_cont_expand_simple(struct inode *inode, loff_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187{
2188 struct address_space *mapping = inode->i_mapping;
2189 struct page *page;
Nick Piggin89e10782007-10-16 01:25:07 -07002190 void *fsdata;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002191 unsigned long limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 int err;
2193
2194 err = -EFBIG;
2195 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2196 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2197 send_sig(SIGXFSZ, current, 0);
2198 goto out;
2199 }
2200 if (size > inode->i_sb->s_maxbytes)
2201 goto out;
2202
Nick Piggin89e10782007-10-16 01:25:07 -07002203 err = pagecache_write_begin(NULL, mapping, size, 0,
2204 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2205 &page, &fsdata);
2206 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207 goto out;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002208
Nick Piggin89e10782007-10-16 01:25:07 -07002209 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2210 BUG_ON(err > 0);
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002211
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212out:
2213 return err;
2214}
2215
Adrian Bunkf1e3af72008-04-29 00:59:01 -07002216static int cont_expand_zero(struct file *file, struct address_space *mapping,
2217 loff_t pos, loff_t *bytes)
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002218{
Nick Piggin89e10782007-10-16 01:25:07 -07002219 struct inode *inode = mapping->host;
2220 unsigned blocksize = 1 << inode->i_blkbits;
2221 struct page *page;
2222 void *fsdata;
2223 pgoff_t index, curidx;
2224 loff_t curpos;
2225 unsigned zerofrom, offset, len;
2226 int err = 0;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002227
Nick Piggin89e10782007-10-16 01:25:07 -07002228 index = pos >> PAGE_CACHE_SHIFT;
2229 offset = pos & ~PAGE_CACHE_MASK;
2230
2231 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2232 zerofrom = curpos & ~PAGE_CACHE_MASK;
2233 if (zerofrom & (blocksize-1)) {
2234 *bytes |= (blocksize-1);
2235 (*bytes)++;
2236 }
2237 len = PAGE_CACHE_SIZE - zerofrom;
2238
2239 err = pagecache_write_begin(file, mapping, curpos, len,
2240 AOP_FLAG_UNINTERRUPTIBLE,
2241 &page, &fsdata);
2242 if (err)
2243 goto out;
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002244 zero_user(page, zerofrom, len);
Nick Piggin89e10782007-10-16 01:25:07 -07002245 err = pagecache_write_end(file, mapping, curpos, len, len,
2246 page, fsdata);
2247 if (err < 0)
2248 goto out;
2249 BUG_ON(err != len);
2250 err = 0;
OGAWA Hirofumi061e9742008-04-28 02:16:28 -07002251
2252 balance_dirty_pages_ratelimited(mapping);
Nick Piggin89e10782007-10-16 01:25:07 -07002253 }
2254
2255 /* page covers the boundary, find the boundary offset */
2256 if (index == curidx) {
2257 zerofrom = curpos & ~PAGE_CACHE_MASK;
2258 /* if we will expand the thing last block will be filled */
2259 if (offset <= zerofrom) {
2260 goto out;
2261 }
2262 if (zerofrom & (blocksize-1)) {
2263 *bytes |= (blocksize-1);
2264 (*bytes)++;
2265 }
2266 len = offset - zerofrom;
2267
2268 err = pagecache_write_begin(file, mapping, curpos, len,
2269 AOP_FLAG_UNINTERRUPTIBLE,
2270 &page, &fsdata);
2271 if (err)
2272 goto out;
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002273 zero_user(page, zerofrom, len);
Nick Piggin89e10782007-10-16 01:25:07 -07002274 err = pagecache_write_end(file, mapping, curpos, len, len,
2275 page, fsdata);
2276 if (err < 0)
2277 goto out;
2278 BUG_ON(err != len);
2279 err = 0;
2280 }
2281out:
2282 return err;
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08002283}
2284
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285/*
2286 * For moronic filesystems that do not allow holes in file.
2287 * We may have to extend the file.
2288 */
Nick Piggin89e10782007-10-16 01:25:07 -07002289int cont_write_begin(struct file *file, struct address_space *mapping,
2290 loff_t pos, unsigned len, unsigned flags,
2291 struct page **pagep, void **fsdata,
2292 get_block_t *get_block, loff_t *bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294 struct inode *inode = mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 unsigned blocksize = 1 << inode->i_blkbits;
Nick Piggin89e10782007-10-16 01:25:07 -07002296 unsigned zerofrom;
2297 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298
Nick Piggin89e10782007-10-16 01:25:07 -07002299 err = cont_expand_zero(file, mapping, pos, bytes);
2300 if (err)
2301 goto out;
2302
2303 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2304 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2305 *bytes |= (blocksize-1);
2306 (*bytes)++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307 }
2308
Nick Piggin89e10782007-10-16 01:25:07 -07002309 *pagep = NULL;
2310 err = block_write_begin(file, mapping, pos, len,
2311 flags, pagep, fsdata, get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312out:
Nick Piggin89e10782007-10-16 01:25:07 -07002313 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314}
2315
2316int block_prepare_write(struct page *page, unsigned from, unsigned to,
2317 get_block_t *get_block)
2318{
2319 struct inode *inode = page->mapping->host;
2320 int err = __block_prepare_write(inode, page, from, to, get_block);
2321 if (err)
2322 ClearPageUptodate(page);
2323 return err;
2324}
2325
2326int block_commit_write(struct page *page, unsigned from, unsigned to)
2327{
2328 struct inode *inode = page->mapping->host;
2329 __block_commit_write(inode,page,from,to);
2330 return 0;
2331}
2332
David Chinner54171692007-07-19 17:39:55 +10002333/*
2334 * block_page_mkwrite() is not allowed to change the file size as it gets
2335 * called from a page fault handler when a page is first dirtied. Hence we must
2336 * be careful to check for EOF conditions here. We set the page up correctly
2337 * for a written page which means we get ENOSPC checking when writing into
2338 * holes and correct delalloc and unwritten extent mapping on filesystems that
2339 * support these features.
2340 *
2341 * We are not allowed to take the i_mutex here so we have to play games to
2342 * protect against truncate races as the page could now be beyond EOF. Because
2343 * vmtruncate() writes the inode size before removing pages, once we have the
2344 * page lock we can determine safely if the page is beyond EOF. If it is not
2345 * beyond EOF, then the page is guaranteed safe against truncation until we
2346 * unlock the page.
2347 */
2348int
Nick Pigginc2ec1752009-03-31 15:23:21 -07002349block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
David Chinner54171692007-07-19 17:39:55 +10002350 get_block_t get_block)
2351{
Nick Pigginc2ec1752009-03-31 15:23:21 -07002352 struct page *page = vmf->page;
David Chinner54171692007-07-19 17:39:55 +10002353 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2354 unsigned long end;
2355 loff_t size;
Nick Piggin56a76f82009-03-31 15:23:23 -07002356 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
David Chinner54171692007-07-19 17:39:55 +10002357
2358 lock_page(page);
2359 size = i_size_read(inode);
2360 if ((page->mapping != inode->i_mapping) ||
Nick Piggin18336332007-07-20 00:31:45 -07002361 (page_offset(page) > size)) {
David Chinner54171692007-07-19 17:39:55 +10002362 /* page got truncated out from underneath us */
2363 goto out_unlock;
2364 }
2365
2366 /* page is wholly or partially inside EOF */
2367 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2368 end = size & ~PAGE_CACHE_MASK;
2369 else
2370 end = PAGE_CACHE_SIZE;
2371
2372 ret = block_prepare_write(page, 0, end, get_block);
2373 if (!ret)
2374 ret = block_commit_write(page, 0, end);
2375
Nick Piggin56a76f82009-03-31 15:23:23 -07002376 if (unlikely(ret)) {
2377 if (ret == -ENOMEM)
2378 ret = VM_FAULT_OOM;
2379 else /* -ENOSPC, -EIO, etc */
2380 ret = VM_FAULT_SIGBUS;
2381 }
Nick Pigginc2ec1752009-03-31 15:23:21 -07002382
Nick Piggin56a76f82009-03-31 15:23:23 -07002383out_unlock:
David Chinner54171692007-07-19 17:39:55 +10002384 unlock_page(page);
2385 return ret;
2386}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387
2388/*
Nick Piggin03158cd2007-10-16 01:25:25 -07002389 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390 * immediately, while under the page lock. So it needs a special end_io
2391 * handler which does not touch the bh after unlocking it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 */
2393static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2394{
Dmitry Monakhov68671f32007-10-16 01:24:47 -07002395 __end_buffer_read_notouch(bh, uptodate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396}
2397
2398/*
Nick Piggin03158cd2007-10-16 01:25:25 -07002399 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2400 * the page (converting it to circular linked list and taking care of page
2401 * dirty races).
2402 */
2403static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2404{
2405 struct buffer_head *bh;
2406
2407 BUG_ON(!PageLocked(page));
2408
2409 spin_lock(&page->mapping->private_lock);
2410 bh = head;
2411 do {
2412 if (PageDirty(page))
2413 set_buffer_dirty(bh);
2414 if (!bh->b_this_page)
2415 bh->b_this_page = head;
2416 bh = bh->b_this_page;
2417 } while (bh != head);
2418 attach_page_buffers(page, head);
2419 spin_unlock(&page->mapping->private_lock);
2420}
2421
2422/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423 * On entry, the page is fully not uptodate.
2424 * On exit the page is fully uptodate in the areas outside (from,to)
2425 */
Nick Piggin03158cd2007-10-16 01:25:25 -07002426int nobh_write_begin(struct file *file, struct address_space *mapping,
2427 loff_t pos, unsigned len, unsigned flags,
2428 struct page **pagep, void **fsdata,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 get_block_t *get_block)
2430{
Nick Piggin03158cd2007-10-16 01:25:25 -07002431 struct inode *inode = mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 const unsigned blkbits = inode->i_blkbits;
2433 const unsigned blocksize = 1 << blkbits;
Nick Piggina4b06722007-10-16 01:24:48 -07002434 struct buffer_head *head, *bh;
Nick Piggin03158cd2007-10-16 01:25:25 -07002435 struct page *page;
2436 pgoff_t index;
2437 unsigned from, to;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 unsigned block_in_page;
Nick Piggina4b06722007-10-16 01:24:48 -07002439 unsigned block_start, block_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 sector_t block_in_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 int nr_reads = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 int ret = 0;
2443 int is_mapped_to_disk = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444
Nick Piggin03158cd2007-10-16 01:25:25 -07002445 index = pos >> PAGE_CACHE_SHIFT;
2446 from = pos & (PAGE_CACHE_SIZE - 1);
2447 to = from + len;
2448
Nick Piggin54566b22009-01-04 12:00:53 -08002449 page = grab_cache_page_write_begin(mapping, index, flags);
Nick Piggin03158cd2007-10-16 01:25:25 -07002450 if (!page)
2451 return -ENOMEM;
2452 *pagep = page;
2453 *fsdata = NULL;
2454
2455 if (page_has_buffers(page)) {
2456 unlock_page(page);
2457 page_cache_release(page);
2458 *pagep = NULL;
2459 return block_write_begin(file, mapping, pos, len, flags, pagep,
2460 fsdata, get_block);
2461 }
Nick Piggina4b06722007-10-16 01:24:48 -07002462
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 if (PageMappedToDisk(page))
2464 return 0;
2465
Nick Piggina4b06722007-10-16 01:24:48 -07002466 /*
2467 * Allocate buffers so that we can keep track of state, and potentially
2468 * attach them to the page if an error occurs. In the common case of
2469 * no error, they will just be freed again without ever being attached
2470 * to the page (which is all OK, because we're under the page lock).
2471 *
2472 * Be careful: the buffer linked list is a NULL terminated one, rather
2473 * than the circular one we're used to.
2474 */
2475 head = alloc_page_buffers(page, blocksize, 0);
Nick Piggin03158cd2007-10-16 01:25:25 -07002476 if (!head) {
2477 ret = -ENOMEM;
2478 goto out_release;
2479 }
Nick Piggina4b06722007-10-16 01:24:48 -07002480
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482
2483 /*
2484 * We loop across all blocks in the page, whether or not they are
2485 * part of the affected region. This is so we can discover if the
2486 * page is fully mapped-to-disk.
2487 */
Nick Piggina4b06722007-10-16 01:24:48 -07002488 for (block_start = 0, block_in_page = 0, bh = head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489 block_start < PAGE_CACHE_SIZE;
Nick Piggina4b06722007-10-16 01:24:48 -07002490 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491 int create;
2492
Nick Piggina4b06722007-10-16 01:24:48 -07002493 block_end = block_start + blocksize;
2494 bh->b_state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495 create = 1;
2496 if (block_start >= to)
2497 create = 0;
2498 ret = get_block(inode, block_in_file + block_in_page,
Nick Piggina4b06722007-10-16 01:24:48 -07002499 bh, create);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 if (ret)
2501 goto failed;
Nick Piggina4b06722007-10-16 01:24:48 -07002502 if (!buffer_mapped(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 is_mapped_to_disk = 0;
Nick Piggina4b06722007-10-16 01:24:48 -07002504 if (buffer_new(bh))
2505 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2506 if (PageUptodate(page)) {
2507 set_buffer_uptodate(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508 continue;
Nick Piggina4b06722007-10-16 01:24:48 -07002509 }
2510 if (buffer_new(bh) || !buffer_mapped(bh)) {
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002511 zero_user_segments(page, block_start, from,
2512 to, block_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513 continue;
2514 }
Nick Piggina4b06722007-10-16 01:24:48 -07002515 if (buffer_uptodate(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 continue; /* reiserfs does this */
2517 if (block_start < from || block_end > to) {
Nick Piggina4b06722007-10-16 01:24:48 -07002518 lock_buffer(bh);
2519 bh->b_end_io = end_buffer_read_nobh;
2520 submit_bh(READ, bh);
2521 nr_reads++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522 }
2523 }
2524
2525 if (nr_reads) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526 /*
2527 * The page is locked, so these buffers are protected from
2528 * any VM or truncate activity. Hence we don't need to care
2529 * for the buffer_head refcounts.
2530 */
Nick Piggina4b06722007-10-16 01:24:48 -07002531 for (bh = head; bh; bh = bh->b_this_page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532 wait_on_buffer(bh);
2533 if (!buffer_uptodate(bh))
2534 ret = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535 }
2536 if (ret)
2537 goto failed;
2538 }
2539
2540 if (is_mapped_to_disk)
2541 SetPageMappedToDisk(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542
Nick Piggin03158cd2007-10-16 01:25:25 -07002543 *fsdata = head; /* to be released by nobh_write_end */
Nick Piggina4b06722007-10-16 01:24:48 -07002544
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 return 0;
2546
2547failed:
Nick Piggin03158cd2007-10-16 01:25:25 -07002548 BUG_ON(!ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 /*
Nick Piggina4b06722007-10-16 01:24:48 -07002550 * Error recovery is a bit difficult. We need to zero out blocks that
2551 * were newly allocated, and dirty them to ensure they get written out.
2552 * Buffers need to be attached to the page at this point, otherwise
2553 * the handling of potential IO errors during writeout would be hard
2554 * (could try doing synchronous writeout, but what if that fails too?)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555 */
Nick Piggin03158cd2007-10-16 01:25:25 -07002556 attach_nobh_buffers(page, head);
2557 page_zero_new_buffers(page, from, to);
Nick Piggina4b06722007-10-16 01:24:48 -07002558
Nick Piggin03158cd2007-10-16 01:25:25 -07002559out_release:
2560 unlock_page(page);
2561 page_cache_release(page);
2562 *pagep = NULL;
Nick Piggina4b06722007-10-16 01:24:48 -07002563
Nick Piggin03158cd2007-10-16 01:25:25 -07002564 if (pos + len > inode->i_size)
2565 vmtruncate(inode, inode->i_size);
Nick Piggina4b06722007-10-16 01:24:48 -07002566
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567 return ret;
2568}
Nick Piggin03158cd2007-10-16 01:25:25 -07002569EXPORT_SYMBOL(nobh_write_begin);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570
Nick Piggin03158cd2007-10-16 01:25:25 -07002571int nobh_write_end(struct file *file, struct address_space *mapping,
2572 loff_t pos, unsigned len, unsigned copied,
2573 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574{
2575 struct inode *inode = page->mapping->host;
Nick Pigginefdc3132007-10-21 06:57:41 +02002576 struct buffer_head *head = fsdata;
Nick Piggin03158cd2007-10-16 01:25:25 -07002577 struct buffer_head *bh;
Dmitri Monakhov5b41e742008-03-28 14:15:52 -07002578 BUG_ON(fsdata != NULL && page_has_buffers(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579
Dave Kleikampd4cf1092009-02-06 14:59:26 -06002580 if (unlikely(copied < len) && head)
Dmitri Monakhov5b41e742008-03-28 14:15:52 -07002581 attach_nobh_buffers(page, head);
2582 if (page_has_buffers(page))
2583 return generic_write_end(file, mapping, pos, len,
2584 copied, page, fsdata);
Nick Piggina4b06722007-10-16 01:24:48 -07002585
Nick Piggin22c8ca72007-02-20 13:58:09 -08002586 SetPageUptodate(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587 set_page_dirty(page);
Nick Piggin03158cd2007-10-16 01:25:25 -07002588 if (pos+copied > inode->i_size) {
2589 i_size_write(inode, pos+copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 mark_inode_dirty(inode);
2591 }
Nick Piggin03158cd2007-10-16 01:25:25 -07002592
2593 unlock_page(page);
2594 page_cache_release(page);
2595
Nick Piggin03158cd2007-10-16 01:25:25 -07002596 while (head) {
2597 bh = head;
2598 head = head->b_this_page;
2599 free_buffer_head(bh);
2600 }
2601
2602 return copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603}
Nick Piggin03158cd2007-10-16 01:25:25 -07002604EXPORT_SYMBOL(nobh_write_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605
2606/*
2607 * nobh_writepage() - based on block_full_write_page() except
2608 * that it tries to operate without attaching bufferheads to
2609 * the page.
2610 */
2611int nobh_writepage(struct page *page, get_block_t *get_block,
2612 struct writeback_control *wbc)
2613{
2614 struct inode * const inode = page->mapping->host;
2615 loff_t i_size = i_size_read(inode);
2616 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2617 unsigned offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618 int ret;
2619
2620 /* Is the page fully inside i_size? */
2621 if (page->index < end_index)
2622 goto out;
2623
2624 /* Is the page fully outside i_size? (truncate in progress) */
2625 offset = i_size & (PAGE_CACHE_SIZE-1);
2626 if (page->index >= end_index+1 || !offset) {
2627 /*
2628 * The page may have dirty, unmapped buffers. For example,
2629 * they may have been added in ext3_writepage(). Make them
2630 * freeable here, so the page does not leak.
2631 */
2632#if 0
2633 /* Not really sure about this - do we need this ? */
2634 if (page->mapping->a_ops->invalidatepage)
2635 page->mapping->a_ops->invalidatepage(page, offset);
2636#endif
2637 unlock_page(page);
2638 return 0; /* don't care */
2639 }
2640
2641 /*
2642 * The page straddles i_size. It must be zeroed out on each and every
2643 * writepage invocation because it may be mmapped. "A file is mapped
2644 * in multiples of the page size. For a file that is not a multiple of
2645 * the page size, the remaining memory is zeroed when mapped, and
2646 * writes to that region are not written out to the file."
2647 */
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002648 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649out:
2650 ret = mpage_writepage(page, get_block, wbc);
2651 if (ret == -EAGAIN)
2652 ret = __block_write_full_page(inode, page, get_block, wbc);
2653 return ret;
2654}
2655EXPORT_SYMBOL(nobh_writepage);
2656
Nick Piggin03158cd2007-10-16 01:25:25 -07002657int nobh_truncate_page(struct address_space *mapping,
2658 loff_t from, get_block_t *get_block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2661 unsigned offset = from & (PAGE_CACHE_SIZE-1);
Nick Piggin03158cd2007-10-16 01:25:25 -07002662 unsigned blocksize;
2663 sector_t iblock;
2664 unsigned length, pos;
2665 struct inode *inode = mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666 struct page *page;
Nick Piggin03158cd2007-10-16 01:25:25 -07002667 struct buffer_head map_bh;
2668 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669
Nick Piggin03158cd2007-10-16 01:25:25 -07002670 blocksize = 1 << inode->i_blkbits;
2671 length = offset & (blocksize - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672
Nick Piggin03158cd2007-10-16 01:25:25 -07002673 /* Block boundary? Nothing to do */
2674 if (!length)
2675 return 0;
2676
2677 length = blocksize - length;
2678 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2679
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680 page = grab_cache_page(mapping, index);
Nick Piggin03158cd2007-10-16 01:25:25 -07002681 err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682 if (!page)
2683 goto out;
2684
Nick Piggin03158cd2007-10-16 01:25:25 -07002685 if (page_has_buffers(page)) {
2686has_buffers:
2687 unlock_page(page);
2688 page_cache_release(page);
2689 return block_truncate_page(mapping, from, get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690 }
Nick Piggin03158cd2007-10-16 01:25:25 -07002691
2692 /* Find the buffer that contains "offset" */
2693 pos = blocksize;
2694 while (offset >= pos) {
2695 iblock++;
2696 pos += blocksize;
2697 }
2698
2699 err = get_block(inode, iblock, &map_bh, 0);
2700 if (err)
2701 goto unlock;
2702 /* unmapped? It's a hole - nothing to do */
2703 if (!buffer_mapped(&map_bh))
2704 goto unlock;
2705
2706 /* Ok, it's mapped. Make sure it's up-to-date */
2707 if (!PageUptodate(page)) {
2708 err = mapping->a_ops->readpage(NULL, page);
2709 if (err) {
2710 page_cache_release(page);
2711 goto out;
2712 }
2713 lock_page(page);
2714 if (!PageUptodate(page)) {
2715 err = -EIO;
2716 goto unlock;
2717 }
2718 if (page_has_buffers(page))
2719 goto has_buffers;
2720 }
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002721 zero_user(page, offset, length);
Nick Piggin03158cd2007-10-16 01:25:25 -07002722 set_page_dirty(page);
2723 err = 0;
2724
2725unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726 unlock_page(page);
2727 page_cache_release(page);
2728out:
Nick Piggin03158cd2007-10-16 01:25:25 -07002729 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730}
2731EXPORT_SYMBOL(nobh_truncate_page);
2732
2733int block_truncate_page(struct address_space *mapping,
2734 loff_t from, get_block_t *get_block)
2735{
2736 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2737 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2738 unsigned blocksize;
Andrew Morton54b21a72006-01-08 01:03:05 -08002739 sector_t iblock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740 unsigned length, pos;
2741 struct inode *inode = mapping->host;
2742 struct page *page;
2743 struct buffer_head *bh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744 int err;
2745
2746 blocksize = 1 << inode->i_blkbits;
2747 length = offset & (blocksize - 1);
2748
2749 /* Block boundary? Nothing to do */
2750 if (!length)
2751 return 0;
2752
2753 length = blocksize - length;
Andrew Morton54b21a72006-01-08 01:03:05 -08002754 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755
2756 page = grab_cache_page(mapping, index);
2757 err = -ENOMEM;
2758 if (!page)
2759 goto out;
2760
2761 if (!page_has_buffers(page))
2762 create_empty_buffers(page, blocksize, 0);
2763
2764 /* Find the buffer that contains "offset" */
2765 bh = page_buffers(page);
2766 pos = blocksize;
2767 while (offset >= pos) {
2768 bh = bh->b_this_page;
2769 iblock++;
2770 pos += blocksize;
2771 }
2772
2773 err = 0;
2774 if (!buffer_mapped(bh)) {
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002775 WARN_ON(bh->b_size != blocksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 err = get_block(inode, iblock, bh, 0);
2777 if (err)
2778 goto unlock;
2779 /* unmapped? It's a hole - nothing to do */
2780 if (!buffer_mapped(bh))
2781 goto unlock;
2782 }
2783
2784 /* Ok, it's mapped. Make sure it's up-to-date */
2785 if (PageUptodate(page))
2786 set_buffer_uptodate(bh);
2787
David Chinner33a266d2007-02-12 00:51:41 -08002788 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 err = -EIO;
2790 ll_rw_block(READ, 1, &bh);
2791 wait_on_buffer(bh);
2792 /* Uhhuh. Read error. Complain and punt. */
2793 if (!buffer_uptodate(bh))
2794 goto unlock;
2795 }
2796
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002797 zero_user(page, offset, length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 mark_buffer_dirty(bh);
2799 err = 0;
2800
2801unlock:
2802 unlock_page(page);
2803 page_cache_release(page);
2804out:
2805 return err;
2806}
2807
2808/*
2809 * The generic ->writepage function for buffer-backed address_spaces
2810 */
2811int block_write_full_page(struct page *page, get_block_t *get_block,
2812 struct writeback_control *wbc)
2813{
2814 struct inode * const inode = page->mapping->host;
2815 loff_t i_size = i_size_read(inode);
2816 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2817 unsigned offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818
2819 /* Is the page fully inside i_size? */
2820 if (page->index < end_index)
2821 return __block_write_full_page(inode, page, get_block, wbc);
2822
2823 /* Is the page fully outside i_size? (truncate in progress) */
2824 offset = i_size & (PAGE_CACHE_SIZE-1);
2825 if (page->index >= end_index+1 || !offset) {
2826 /*
2827 * The page may have dirty, unmapped buffers. For example,
2828 * they may have been added in ext3_writepage(). Make them
2829 * freeable here, so the page does not leak.
2830 */
Jan Karaaaa40592005-10-30 15:00:16 -08002831 do_invalidatepage(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 unlock_page(page);
2833 return 0; /* don't care */
2834 }
2835
2836 /*
2837 * The page straddles i_size. It must be zeroed out on each and every
2838 * writepage invokation because it may be mmapped. "A file is mapped
2839 * in multiples of the page size. For a file that is not a multiple of
2840 * the page size, the remaining memory is zeroed when mapped, and
2841 * writes to that region are not written out to the file."
2842 */
Christoph Lametereebd2aa2008-02-04 22:28:29 -08002843 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 return __block_write_full_page(inode, page, get_block, wbc);
2845}
2846
2847sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2848 get_block_t *get_block)
2849{
2850 struct buffer_head tmp;
2851 struct inode *inode = mapping->host;
2852 tmp.b_state = 0;
2853 tmp.b_blocknr = 0;
Badari Pulavartyb0cf2322006-03-26 01:38:00 -08002854 tmp.b_size = 1 << inode->i_blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 get_block(inode, block, &tmp, 0);
2856 return tmp.b_blocknr;
2857}
2858
NeilBrown6712ecf2007-09-27 12:47:43 +02002859static void end_bio_bh_io_sync(struct bio *bio, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860{
2861 struct buffer_head *bh = bio->bi_private;
2862
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863 if (err == -EOPNOTSUPP) {
2864 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2865 set_bit(BH_Eopnotsupp, &bh->b_state);
2866 }
2867
Keith Mannthey08bafc02008-11-25 10:24:35 +01002868 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2869 set_bit(BH_Quiet, &bh->b_state);
2870
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2872 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873}
2874
2875int submit_bh(int rw, struct buffer_head * bh)
2876{
2877 struct bio *bio;
2878 int ret = 0;
2879
2880 BUG_ON(!buffer_locked(bh));
2881 BUG_ON(!buffer_mapped(bh));
2882 BUG_ON(!bh->b_end_io);
2883
Jens Axboe48fd4f92008-08-22 10:00:36 +02002884 /*
2885 * Mask in barrier bit for a write (could be either a WRITE or a
2886 * WRITE_SYNC
2887 */
2888 if (buffer_ordered(bh) && (rw & WRITE))
2889 rw |= WRITE_BARRIER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890
2891 /*
Jens Axboe48fd4f92008-08-22 10:00:36 +02002892 * Only clear out a write error when rewriting
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 */
Jens Axboe48fd4f92008-08-22 10:00:36 +02002894 if (test_set_buffer_req(bh) && (rw & WRITE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 clear_buffer_write_io_error(bh);
2896
2897 /*
2898 * from here on down, it's all bio -- do the initial mapping,
2899 * submit_bio -> generic_make_request may further map this bio around
2900 */
2901 bio = bio_alloc(GFP_NOIO, 1);
2902
2903 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2904 bio->bi_bdev = bh->b_bdev;
2905 bio->bi_io_vec[0].bv_page = bh->b_page;
2906 bio->bi_io_vec[0].bv_len = bh->b_size;
2907 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2908
2909 bio->bi_vcnt = 1;
2910 bio->bi_idx = 0;
2911 bio->bi_size = bh->b_size;
2912
2913 bio->bi_end_io = end_bio_bh_io_sync;
2914 bio->bi_private = bh;
2915
2916 bio_get(bio);
2917 submit_bio(rw, bio);
2918
2919 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2920 ret = -EOPNOTSUPP;
2921
2922 bio_put(bio);
2923 return ret;
2924}
2925
2926/**
2927 * ll_rw_block: low-level access to block devices (DEPRECATED)
Jan Karaa7662232005-09-06 15:19:10 -07002928 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 * @nr: number of &struct buffer_heads in the array
2930 * @bhs: array of pointers to &struct buffer_head
2931 *
Jan Karaa7662232005-09-06 15:19:10 -07002932 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2933 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2934 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2935 * are sent to disk. The fourth %READA option is described in the documentation
2936 * for generic_make_request() which ll_rw_block() calls.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937 *
2938 * This function drops any buffer that it cannot get a lock on (with the
Jan Karaa7662232005-09-06 15:19:10 -07002939 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2940 * clean when doing a write request, and any buffer that appears to be
2941 * up-to-date when doing read request. Further it marks as clean buffers that
2942 * are processed for writing (the buffer cache won't assume that they are
2943 * actually clean until the buffer gets unlocked).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944 *
2945 * ll_rw_block sets b_end_io to simple completion handler that marks
2946 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2947 * any waiters.
2948 *
2949 * All of the buffers must be for the same device, and must also be a
2950 * multiple of the current approved size for the device.
2951 */
2952void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2953{
2954 int i;
2955
2956 for (i = 0; i < nr; i++) {
2957 struct buffer_head *bh = bhs[i];
2958
Jens Axboe18ce3752008-07-01 09:07:34 +02002959 if (rw == SWRITE || rw == SWRITE_SYNC)
Jan Karaa7662232005-09-06 15:19:10 -07002960 lock_buffer(bh);
Nick Pigginca5de402008-08-02 12:02:13 +02002961 else if (!trylock_buffer(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962 continue;
2963
Jens Axboe18ce3752008-07-01 09:07:34 +02002964 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965 if (test_clear_buffer_dirty(bh)) {
akpm@osdl.org76c30732005-04-16 15:24:07 -07002966 bh->b_end_io = end_buffer_write_sync;
OGAWA Hirofumie60e5c52006-02-03 03:04:43 -08002967 get_bh(bh);
Jens Axboe18ce3752008-07-01 09:07:34 +02002968 if (rw == SWRITE_SYNC)
2969 submit_bh(WRITE_SYNC, bh);
2970 else
2971 submit_bh(WRITE, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972 continue;
2973 }
2974 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975 if (!buffer_uptodate(bh)) {
akpm@osdl.org76c30732005-04-16 15:24:07 -07002976 bh->b_end_io = end_buffer_read_sync;
OGAWA Hirofumie60e5c52006-02-03 03:04:43 -08002977 get_bh(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978 submit_bh(rw, bh);
2979 continue;
2980 }
2981 }
2982 unlock_buffer(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002983 }
2984}
2985
2986/*
2987 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2988 * and then start new I/O and then wait upon it. The caller must have a ref on
2989 * the buffer_head.
2990 */
2991int sync_dirty_buffer(struct buffer_head *bh)
2992{
2993 int ret = 0;
2994
2995 WARN_ON(atomic_read(&bh->b_count) < 1);
2996 lock_buffer(bh);
2997 if (test_clear_buffer_dirty(bh)) {
2998 get_bh(bh);
2999 bh->b_end_io = end_buffer_write_sync;
Jens Axboe78f707b2009-02-17 13:59:08 +01003000 ret = submit_bh(WRITE, bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001 wait_on_buffer(bh);
3002 if (buffer_eopnotsupp(bh)) {
3003 clear_buffer_eopnotsupp(bh);
3004 ret = -EOPNOTSUPP;
3005 }
3006 if (!ret && !buffer_uptodate(bh))
3007 ret = -EIO;
3008 } else {
3009 unlock_buffer(bh);
3010 }
3011 return ret;
3012}
3013
3014/*
3015 * try_to_free_buffers() checks if all the buffers on this particular page
3016 * are unused, and releases them if so.
3017 *
3018 * Exclusion against try_to_free_buffers may be obtained by either
3019 * locking the page or by holding its mapping's private_lock.
3020 *
3021 * If the page is dirty but all the buffers are clean then we need to
3022 * be sure to mark the page clean as well. This is because the page
3023 * may be against a block device, and a later reattachment of buffers
3024 * to a dirty page will set *all* buffers dirty. Which would corrupt
3025 * filesystem data on the same device.
3026 *
3027 * The same applies to regular filesystem pages: if all the buffers are
3028 * clean then we set the page clean and proceed. To do that, we require
3029 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3030 * private_lock.
3031 *
3032 * try_to_free_buffers() is non-blocking.
3033 */
3034static inline int buffer_busy(struct buffer_head *bh)
3035{
3036 return atomic_read(&bh->b_count) |
3037 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3038}
3039
3040static int
3041drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3042{
3043 struct buffer_head *head = page_buffers(page);
3044 struct buffer_head *bh;
3045
3046 bh = head;
3047 do {
akpm@osdl.orgde7d5a32005-05-01 08:58:39 -07003048 if (buffer_write_io_error(bh) && page->mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049 set_bit(AS_EIO, &page->mapping->flags);
3050 if (buffer_busy(bh))
3051 goto failed;
3052 bh = bh->b_this_page;
3053 } while (bh != head);
3054
3055 do {
3056 struct buffer_head *next = bh->b_this_page;
3057
Jan Kara535ee2f2008-02-08 04:21:59 -08003058 if (bh->b_assoc_map)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059 __remove_assoc_queue(bh);
3060 bh = next;
3061 } while (bh != head);
3062 *buffers_to_free = head;
3063 __clear_page_buffers(page);
3064 return 1;
3065failed:
3066 return 0;
3067}
3068
3069int try_to_free_buffers(struct page *page)
3070{
3071 struct address_space * const mapping = page->mapping;
3072 struct buffer_head *buffers_to_free = NULL;
3073 int ret = 0;
3074
3075 BUG_ON(!PageLocked(page));
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003076 if (PageWriteback(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003077 return 0;
3078
3079 if (mapping == NULL) { /* can this still happen? */
3080 ret = drop_buffers(page, &buffers_to_free);
3081 goto out;
3082 }
3083
3084 spin_lock(&mapping->private_lock);
3085 ret = drop_buffers(page, &buffers_to_free);
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003086
3087 /*
3088 * If the filesystem writes its buffers by hand (eg ext3)
3089 * then we can have clean buffers against a dirty page. We
3090 * clean the page here; otherwise the VM will never notice
3091 * that the filesystem did any IO at all.
3092 *
3093 * Also, during truncate, discard_buffer will have marked all
3094 * the page's buffers clean. We discover that here and clean
3095 * the page also.
Nick Piggin87df7242007-01-30 14:36:27 +11003096 *
3097 * private_lock must be held over this entire operation in order
3098 * to synchronise against __set_page_dirty_buffers and prevent the
3099 * dirty bit from being lost.
Linus Torvaldsecdfc972007-01-26 12:47:06 -08003100 */
3101 if (ret)
3102 cancel_dirty_page(page, PAGE_CACHE_SIZE);
Nick Piggin87df7242007-01-30 14:36:27 +11003103 spin_unlock(&mapping->private_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003104out:
3105 if (buffers_to_free) {
3106 struct buffer_head *bh = buffers_to_free;
3107
3108 do {
3109 struct buffer_head *next = bh->b_this_page;
3110 free_buffer_head(bh);
3111 bh = next;
3112 } while (bh != buffers_to_free);
3113 }
3114 return ret;
3115}
3116EXPORT_SYMBOL(try_to_free_buffers);
3117
NeilBrown3978d712006-03-26 01:37:17 -08003118void block_sync_page(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119{
3120 struct address_space *mapping;
3121
3122 smp_mb();
3123 mapping = page_mapping(page);
3124 if (mapping)
3125 blk_run_backing_dev(mapping->backing_dev_info, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003126}
3127
3128/*
3129 * There are no bdflush tunables left. But distributions are
3130 * still running obsolete flush daemons, so we terminate them here.
3131 *
3132 * Use of bdflush() is deprecated and will be removed in a future kernel.
3133 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3134 */
Heiko Carstensbdc480e2009-01-14 14:14:12 +01003135SYSCALL_DEFINE2(bdflush, int, func, long, data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003136{
3137 static int msg_count;
3138
3139 if (!capable(CAP_SYS_ADMIN))
3140 return -EPERM;
3141
3142 if (msg_count < 5) {
3143 msg_count++;
3144 printk(KERN_INFO
3145 "warning: process `%s' used the obsolete bdflush"
3146 " system call\n", current->comm);
3147 printk(KERN_INFO "Fix your initscripts?\n");
3148 }
3149
3150 if (func == 1)
3151 do_exit(0);
3152 return 0;
3153}
3154
3155/*
3156 * Buffer-head allocation
3157 */
Christoph Lametere18b8902006-12-06 20:33:20 -08003158static struct kmem_cache *bh_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159
3160/*
3161 * Once the number of bh's in the machine exceeds this level, we start
3162 * stripping them in writeback.
3163 */
3164static int max_buffer_heads;
3165
3166int buffer_heads_over_limit;
3167
3168struct bh_accounting {
3169 int nr; /* Number of live bh's */
3170 int ratelimit; /* Limit cacheline bouncing */
3171};
3172
3173static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3174
3175static void recalc_bh_state(void)
3176{
3177 int i;
3178 int tot = 0;
3179
3180 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3181 return;
3182 __get_cpu_var(bh_accounting).ratelimit = 0;
Eric Dumazet8a143422006-03-24 03:18:10 -08003183 for_each_online_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184 tot += per_cpu(bh_accounting, i).nr;
3185 buffer_heads_over_limit = (tot > max_buffer_heads);
3186}
3187
Al Virodd0fc662005-10-07 07:46:04 +01003188struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189{
Christoph Lameter488514d2008-04-28 02:12:05 -07003190 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191 if (ret) {
Christoph Lametera35afb82007-05-16 22:10:57 -07003192 INIT_LIST_HEAD(&ret->b_assoc_buffers);
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003193 get_cpu_var(bh_accounting).nr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003194 recalc_bh_state();
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003195 put_cpu_var(bh_accounting);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003196 }
3197 return ret;
3198}
3199EXPORT_SYMBOL(alloc_buffer_head);
3200
3201void free_buffer_head(struct buffer_head *bh)
3202{
3203 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3204 kmem_cache_free(bh_cachep, bh);
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003205 get_cpu_var(bh_accounting).nr--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003206 recalc_bh_state();
Coywolf Qi Hunt736c7b82005-09-06 15:18:17 -07003207 put_cpu_var(bh_accounting);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208}
3209EXPORT_SYMBOL(free_buffer_head);
3210
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211static void buffer_exit_cpu(int cpu)
3212{
3213 int i;
3214 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3215
3216 for (i = 0; i < BH_LRU_SIZE; i++) {
3217 brelse(b->bhs[i]);
3218 b->bhs[i] = NULL;
3219 }
Eric Dumazet8a143422006-03-24 03:18:10 -08003220 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3221 per_cpu(bh_accounting, cpu).nr = 0;
3222 put_cpu_var(bh_accounting);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003223}
3224
3225static int buffer_cpu_notify(struct notifier_block *self,
3226 unsigned long action, void *hcpu)
3227{
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07003228 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003229 buffer_exit_cpu((unsigned long)hcpu);
3230 return NOTIFY_OK;
3231}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232
Aneesh Kumar K.V389d1b02008-01-28 23:58:26 -05003233/**
Randy Dunlapa6b91912008-03-19 17:01:00 -07003234 * bh_uptodate_or_lock - Test whether the buffer is uptodate
Aneesh Kumar K.V389d1b02008-01-28 23:58:26 -05003235 * @bh: struct buffer_head
3236 *
3237 * Return true if the buffer is up-to-date and false,
3238 * with the buffer locked, if not.
3239 */
3240int bh_uptodate_or_lock(struct buffer_head *bh)
3241{
3242 if (!buffer_uptodate(bh)) {
3243 lock_buffer(bh);
3244 if (!buffer_uptodate(bh))
3245 return 0;
3246 unlock_buffer(bh);
3247 }
3248 return 1;
3249}
3250EXPORT_SYMBOL(bh_uptodate_or_lock);
3251
3252/**
Randy Dunlapa6b91912008-03-19 17:01:00 -07003253 * bh_submit_read - Submit a locked buffer for reading
Aneesh Kumar K.V389d1b02008-01-28 23:58:26 -05003254 * @bh: struct buffer_head
3255 *
3256 * Returns zero on success and -EIO on error.
3257 */
3258int bh_submit_read(struct buffer_head *bh)
3259{
3260 BUG_ON(!buffer_locked(bh));
3261
3262 if (buffer_uptodate(bh)) {
3263 unlock_buffer(bh);
3264 return 0;
3265 }
3266
3267 get_bh(bh);
3268 bh->b_end_io = end_buffer_read_sync;
3269 submit_bh(READ, bh);
3270 wait_on_buffer(bh);
3271 if (buffer_uptodate(bh))
3272 return 0;
3273 return -EIO;
3274}
3275EXPORT_SYMBOL(bh_submit_read);
3276
Christoph Lameterb98938c2008-02-04 22:28:36 -08003277static void
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07003278init_buffer_head(void *data)
Christoph Lameterb98938c2008-02-04 22:28:36 -08003279{
3280 struct buffer_head *bh = data;
3281
3282 memset(bh, 0, sizeof(*bh));
3283 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3284}
3285
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286void __init buffer_init(void)
3287{
3288 int nrpages;
3289
Christoph Lameterb98938c2008-02-04 22:28:36 -08003290 bh_cachep = kmem_cache_create("buffer_head",
3291 sizeof(struct buffer_head), 0,
3292 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3293 SLAB_MEM_SPREAD),
3294 init_buffer_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295
3296 /*
3297 * Limit the bh occupancy to 10% of ZONE_NORMAL
3298 */
3299 nrpages = (nr_free_buffer_pages() * 10) / 100;
3300 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3301 hotcpu_notifier(buffer_cpu_notify, 0);
3302}
3303
3304EXPORT_SYMBOL(__bforget);
3305EXPORT_SYMBOL(__brelse);
3306EXPORT_SYMBOL(__wait_on_buffer);
3307EXPORT_SYMBOL(block_commit_write);
3308EXPORT_SYMBOL(block_prepare_write);
David Chinner54171692007-07-19 17:39:55 +10003309EXPORT_SYMBOL(block_page_mkwrite);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003310EXPORT_SYMBOL(block_read_full_page);
3311EXPORT_SYMBOL(block_sync_page);
3312EXPORT_SYMBOL(block_truncate_page);
3313EXPORT_SYMBOL(block_write_full_page);
Nick Piggin89e10782007-10-16 01:25:07 -07003314EXPORT_SYMBOL(cont_write_begin);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315EXPORT_SYMBOL(end_buffer_read_sync);
3316EXPORT_SYMBOL(end_buffer_write_sync);
3317EXPORT_SYMBOL(file_fsync);
3318EXPORT_SYMBOL(fsync_bdev);
3319EXPORT_SYMBOL(generic_block_bmap);
OGAWA Hirofumi05eb0b52006-01-08 01:02:13 -08003320EXPORT_SYMBOL(generic_cont_expand_simple);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321EXPORT_SYMBOL(init_buffer);
3322EXPORT_SYMBOL(invalidate_bdev);
3323EXPORT_SYMBOL(ll_rw_block);
3324EXPORT_SYMBOL(mark_buffer_dirty);
3325EXPORT_SYMBOL(submit_bh);
3326EXPORT_SYMBOL(sync_dirty_buffer);
3327EXPORT_SYMBOL(unlock_buffer);