blob: eef4a0ba11e9867a0991b2435a08fc1603dd80d1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scottf07c2252006-09-28 10:52:15 +10002 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Nathan Scott7b718762005-11-02 14:58:39 +11003 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Vlad Apostolov93c189c2006-11-11 18:03:49 +110018#include "xfs.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/stddef.h>
20#include <linux/errno.h>
21#include <linux/slab.h>
22#include <linux/pagemap.h>
23#include <linux/init.h>
24#include <linux/vmalloc.h>
25#include <linux/bio.h>
26#include <linux/sysctl.h>
27#include <linux/proc_fs.h>
28#include <linux/workqueue.h>
29#include <linux/percpu.h>
30#include <linux/blkdev.h>
31#include <linux/hash.h>
Christoph Hellwig4df08c52005-09-05 08:34:18 +100032#include <linux/kthread.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080033#include <linux/migrate.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070034#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Nathan Scottce8e9222006-01-11 15:39:08 +110036STATIC kmem_zone_t *xfs_buf_zone;
37STATIC kmem_shaker_t xfs_buf_shake;
David Chinnera6867a62006-01-11 15:37:58 +110038STATIC int xfsbufd(void *);
Al Viro27496a82005-10-21 03:20:48 -040039STATIC int xfsbufd_wakeup(int, gfp_t);
Nathan Scottce8e9222006-01-11 15:39:08 +110040STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
Christoph Hellwig23ea4032005-06-21 15:14:01 +100041
42STATIC struct workqueue_struct *xfslogd_workqueue;
Christoph Hellwig0829c362005-09-02 16:58:49 +100043struct workqueue_struct *xfsdatad_workqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Nathan Scottce8e9222006-01-11 15:39:08 +110045#ifdef XFS_BUF_TRACE
Linus Torvalds1da177e2005-04-16 15:20:36 -070046void
Nathan Scottce8e9222006-01-11 15:39:08 +110047xfs_buf_trace(
48 xfs_buf_t *bp,
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 char *id,
50 void *data,
51 void *ra)
52{
Nathan Scottce8e9222006-01-11 15:39:08 +110053 ktrace_enter(xfs_buf_trace_buf,
54 bp, id,
55 (void *)(unsigned long)bp->b_flags,
56 (void *)(unsigned long)bp->b_hold.counter,
57 (void *)(unsigned long)bp->b_sema.count.counter,
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 (void *)current,
59 data, ra,
Nathan Scottce8e9222006-01-11 15:39:08 +110060 (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
61 (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
62 (void *)(unsigned long)bp->b_buffer_length,
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 NULL, NULL, NULL, NULL, NULL);
64}
Nathan Scottce8e9222006-01-11 15:39:08 +110065ktrace_t *xfs_buf_trace_buf;
66#define XFS_BUF_TRACE_SIZE 4096
67#define XB_TRACE(bp, id, data) \
68 xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#else
Nathan Scottce8e9222006-01-11 15:39:08 +110070#define XB_TRACE(bp, id, data) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#endif
72
Nathan Scottce8e9222006-01-11 15:39:08 +110073#ifdef XFS_BUF_LOCK_TRACKING
74# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
75# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
76# define XB_GET_OWNER(bp) ((bp)->b_last_holder)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#else
Nathan Scottce8e9222006-01-11 15:39:08 +110078# define XB_SET_OWNER(bp) do { } while (0)
79# define XB_CLEAR_OWNER(bp) do { } while (0)
80# define XB_GET_OWNER(bp) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070081#endif
82
Nathan Scottce8e9222006-01-11 15:39:08 +110083#define xb_to_gfp(flags) \
84 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
85 ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
Nathan Scottce8e9222006-01-11 15:39:08 +110087#define xb_to_km(flags) \
88 (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
Nathan Scottce8e9222006-01-11 15:39:08 +110090#define xfs_buf_allocate(flags) \
91 kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
92#define xfs_buf_deallocate(bp) \
93 kmem_zone_free(xfs_buf_zone, (bp));
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
95/*
Nathan Scottce8e9222006-01-11 15:39:08 +110096 * Page Region interfaces.
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 *
Nathan Scottce8e9222006-01-11 15:39:08 +110098 * For pages in filesystems where the blocksize is smaller than the
99 * pagesize, we use the page->private field (long) to hold a bitmap
100 * of uptodate regions within the page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 *
Nathan Scottce8e9222006-01-11 15:39:08 +1100102 * Each such region is "bytes per page / bits per long" bytes long.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 *
Nathan Scottce8e9222006-01-11 15:39:08 +1100104 * NBPPR == number-of-bytes-per-page-region
105 * BTOPR == bytes-to-page-region (rounded up)
106 * BTOPRT == bytes-to-page-region-truncated (rounded down)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 */
108#if (BITS_PER_LONG == 32)
109#define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
110#elif (BITS_PER_LONG == 64)
111#define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */
112#else
113#error BITS_PER_LONG must be 32 or 64
114#endif
115#define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG)
116#define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
117#define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT))
118
119STATIC unsigned long
120page_region_mask(
121 size_t offset,
122 size_t length)
123{
124 unsigned long mask;
125 int first, final;
126
127 first = BTOPR(offset);
128 final = BTOPRT(offset + length - 1);
129 first = min(first, final);
130
131 mask = ~0UL;
132 mask <<= BITS_PER_LONG - (final - first);
133 mask >>= BITS_PER_LONG - (final);
134
135 ASSERT(offset + length <= PAGE_CACHE_SIZE);
136 ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
137
138 return mask;
139}
140
141STATIC inline void
142set_page_region(
143 struct page *page,
144 size_t offset,
145 size_t length)
146{
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700147 set_page_private(page,
148 page_private(page) | page_region_mask(offset, length));
149 if (page_private(page) == ~0UL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 SetPageUptodate(page);
151}
152
153STATIC inline int
154test_page_region(
155 struct page *page,
156 size_t offset,
157 size_t length)
158{
159 unsigned long mask = page_region_mask(offset, length);
160
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700161 return (mask && (page_private(page) & mask) == mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162}
163
164/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100165 * Mapping of multi-page buffers into contiguous virtual space
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 */
167
168typedef struct a_list {
169 void *vm_addr;
170 struct a_list *next;
171} a_list_t;
172
173STATIC a_list_t *as_free_head;
174STATIC int as_list_len;
175STATIC DEFINE_SPINLOCK(as_lock);
176
177/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100178 * Try to batch vunmaps because they are costly.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 */
180STATIC void
181free_address(
182 void *addr)
183{
184 a_list_t *aentry;
185
Jeff Dike7b04d712006-04-10 22:53:27 -0700186 aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 if (likely(aentry)) {
188 spin_lock(&as_lock);
189 aentry->next = as_free_head;
190 aentry->vm_addr = addr;
191 as_free_head = aentry;
192 as_list_len++;
193 spin_unlock(&as_lock);
194 } else {
195 vunmap(addr);
196 }
197}
198
199STATIC void
200purge_addresses(void)
201{
202 a_list_t *aentry, *old;
203
204 if (as_free_head == NULL)
205 return;
206
207 spin_lock(&as_lock);
208 aentry = as_free_head;
209 as_free_head = NULL;
210 as_list_len = 0;
211 spin_unlock(&as_lock);
212
213 while ((old = aentry) != NULL) {
214 vunmap(aentry->vm_addr);
215 aentry = aentry->next;
216 kfree(old);
217 }
218}
219
220/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100221 * Internal xfs_buf_t object manipulation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 */
223
224STATIC void
Nathan Scottce8e9222006-01-11 15:39:08 +1100225_xfs_buf_initialize(
226 xfs_buf_t *bp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 xfs_buftarg_t *target,
Nathan Scott204ab252006-01-11 20:50:22 +1100228 xfs_off_t range_base,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 size_t range_length,
Nathan Scottce8e9222006-01-11 15:39:08 +1100230 xfs_buf_flags_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231{
232 /*
Nathan Scottce8e9222006-01-11 15:39:08 +1100233 * We don't want certain flags to appear in b_flags.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 */
Nathan Scottce8e9222006-01-11 15:39:08 +1100235 flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
Nathan Scottce8e9222006-01-11 15:39:08 +1100237 memset(bp, 0, sizeof(xfs_buf_t));
238 atomic_set(&bp->b_hold, 1);
239 init_MUTEX_LOCKED(&bp->b_iodonesema);
240 INIT_LIST_HEAD(&bp->b_list);
241 INIT_LIST_HEAD(&bp->b_hash_list);
242 init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
243 XB_SET_OWNER(bp);
244 bp->b_target = target;
245 bp->b_file_offset = range_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 /*
247 * Set buffer_length and count_desired to the same value initially.
248 * I/O routines should use count_desired, which will be the same in
249 * most cases but may be reset (e.g. XFS recovery).
250 */
Nathan Scottce8e9222006-01-11 15:39:08 +1100251 bp->b_buffer_length = bp->b_count_desired = range_length;
252 bp->b_flags = flags;
253 bp->b_bn = XFS_BUF_DADDR_NULL;
254 atomic_set(&bp->b_pin_count, 0);
255 init_waitqueue_head(&bp->b_waiters);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
Nathan Scottce8e9222006-01-11 15:39:08 +1100257 XFS_STATS_INC(xb_create);
258 XB_TRACE(bp, "initialize", target);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259}
260
261/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100262 * Allocate a page array capable of holding a specified number
263 * of pages, and point the page buf at it.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 */
265STATIC int
Nathan Scottce8e9222006-01-11 15:39:08 +1100266_xfs_buf_get_pages(
267 xfs_buf_t *bp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 int page_count,
Nathan Scottce8e9222006-01-11 15:39:08 +1100269 xfs_buf_flags_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270{
271 /* Make sure that we have a page list */
Nathan Scottce8e9222006-01-11 15:39:08 +1100272 if (bp->b_pages == NULL) {
273 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
274 bp->b_page_count = page_count;
275 if (page_count <= XB_PAGES) {
276 bp->b_pages = bp->b_page_array;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 } else {
Nathan Scottce8e9222006-01-11 15:39:08 +1100278 bp->b_pages = kmem_alloc(sizeof(struct page *) *
279 page_count, xb_to_km(flags));
280 if (bp->b_pages == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 return -ENOMEM;
282 }
Nathan Scottce8e9222006-01-11 15:39:08 +1100283 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 }
285 return 0;
286}
287
288/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100289 * Frees b_pages if it was allocated.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 */
291STATIC void
Nathan Scottce8e9222006-01-11 15:39:08 +1100292_xfs_buf_free_pages(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 xfs_buf_t *bp)
294{
Nathan Scottce8e9222006-01-11 15:39:08 +1100295 if (bp->b_pages != bp->b_page_array) {
296 kmem_free(bp->b_pages,
297 bp->b_page_count * sizeof(struct page *));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 }
299}
300
301/*
302 * Releases the specified buffer.
303 *
304 * The modification state of any associated pages is left unchanged.
Nathan Scottce8e9222006-01-11 15:39:08 +1100305 * The buffer most not be on any hash - use xfs_buf_rele instead for
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 * hashed and refcounted buffers
307 */
308void
Nathan Scottce8e9222006-01-11 15:39:08 +1100309xfs_buf_free(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 xfs_buf_t *bp)
311{
Nathan Scottce8e9222006-01-11 15:39:08 +1100312 XB_TRACE(bp, "free", 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
Nathan Scottce8e9222006-01-11 15:39:08 +1100314 ASSERT(list_empty(&bp->b_hash_list));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
Nathan Scottce8e9222006-01-11 15:39:08 +1100316 if (bp->b_flags & _XBF_PAGE_CACHE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 uint i;
318
Nathan Scottce8e9222006-01-11 15:39:08 +1100319 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
320 free_address(bp->b_addr - bp->b_offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321
Nathan Scott948ecdb2006-09-28 11:03:13 +1000322 for (i = 0; i < bp->b_page_count; i++) {
323 struct page *page = bp->b_pages[i];
324
325 ASSERT(!PagePrivate(page));
326 page_cache_release(page);
327 }
Nathan Scottce8e9222006-01-11 15:39:08 +1100328 _xfs_buf_free_pages(bp);
329 } else if (bp->b_flags & _XBF_KMEM_ALLOC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 /*
Nathan Scottce8e9222006-01-11 15:39:08 +1100331 * XXX(hch): bp->b_count_desired might be incorrect (see
332 * xfs_buf_associate_memory for details), but fortunately
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 * the Linux version of kmem_free ignores the len argument..
334 */
Nathan Scottce8e9222006-01-11 15:39:08 +1100335 kmem_free(bp->b_addr, bp->b_count_desired);
336 _xfs_buf_free_pages(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 }
338
Nathan Scottce8e9222006-01-11 15:39:08 +1100339 xfs_buf_deallocate(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340}
341
342/*
343 * Finds all pages for buffer in question and builds it's page list.
344 */
345STATIC int
Nathan Scottce8e9222006-01-11 15:39:08 +1100346_xfs_buf_lookup_pages(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 xfs_buf_t *bp,
348 uint flags)
349{
Nathan Scottce8e9222006-01-11 15:39:08 +1100350 struct address_space *mapping = bp->b_target->bt_mapping;
351 size_t blocksize = bp->b_target->bt_bsize;
352 size_t size = bp->b_count_desired;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 size_t nbytes, offset;
Nathan Scottce8e9222006-01-11 15:39:08 +1100354 gfp_t gfp_mask = xb_to_gfp(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 unsigned short page_count, i;
356 pgoff_t first;
Nathan Scott204ab252006-01-11 20:50:22 +1100357 xfs_off_t end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 int error;
359
Nathan Scottce8e9222006-01-11 15:39:08 +1100360 end = bp->b_file_offset + bp->b_buffer_length;
361 page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
Nathan Scottce8e9222006-01-11 15:39:08 +1100363 error = _xfs_buf_get_pages(bp, page_count, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 if (unlikely(error))
365 return error;
Nathan Scottce8e9222006-01-11 15:39:08 +1100366 bp->b_flags |= _XBF_PAGE_CACHE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
Nathan Scottce8e9222006-01-11 15:39:08 +1100368 offset = bp->b_offset;
369 first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
Nathan Scottce8e9222006-01-11 15:39:08 +1100371 for (i = 0; i < bp->b_page_count; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 struct page *page;
373 uint retries = 0;
374
375 retry:
376 page = find_or_create_page(mapping, first + i, gfp_mask);
377 if (unlikely(page == NULL)) {
Nathan Scottce8e9222006-01-11 15:39:08 +1100378 if (flags & XBF_READ_AHEAD) {
379 bp->b_page_count = i;
380 for (i = 0; i < bp->b_page_count; i++)
381 unlock_page(bp->b_pages[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 return -ENOMEM;
383 }
384
385 /*
386 * This could deadlock.
387 *
388 * But until all the XFS lowlevel code is revamped to
389 * handle buffer allocation failures we can't do much.
390 */
391 if (!(++retries % 100))
392 printk(KERN_ERR
393 "XFS: possible memory allocation "
394 "deadlock in %s (mode:0x%x)\n",
395 __FUNCTION__, gfp_mask);
396
Nathan Scottce8e9222006-01-11 15:39:08 +1100397 XFS_STATS_INC(xb_page_retries);
Christoph Hellwig23ea4032005-06-21 15:14:01 +1000398 xfsbufd_wakeup(0, gfp_mask);
Andrew Morton3fcfab12006-10-19 23:28:16 -0700399 congestion_wait(WRITE, HZ/50);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 goto retry;
401 }
402
Nathan Scottce8e9222006-01-11 15:39:08 +1100403 XFS_STATS_INC(xb_page_found);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
405 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
406 size -= nbytes;
407
Nathan Scott948ecdb2006-09-28 11:03:13 +1000408 ASSERT(!PagePrivate(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 if (!PageUptodate(page)) {
410 page_count--;
411 if (blocksize >= PAGE_CACHE_SIZE) {
Nathan Scottce8e9222006-01-11 15:39:08 +1100412 if (flags & XBF_READ)
413 bp->b_locked = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 } else if (!PagePrivate(page)) {
415 if (test_page_region(page, offset, nbytes))
416 page_count++;
417 }
418 }
419
Nathan Scottce8e9222006-01-11 15:39:08 +1100420 bp->b_pages[i] = page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 offset = 0;
422 }
423
Nathan Scottce8e9222006-01-11 15:39:08 +1100424 if (!bp->b_locked) {
425 for (i = 0; i < bp->b_page_count; i++)
426 unlock_page(bp->b_pages[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 }
428
Nathan Scottce8e9222006-01-11 15:39:08 +1100429 if (page_count == bp->b_page_count)
430 bp->b_flags |= XBF_DONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431
Nathan Scottce8e9222006-01-11 15:39:08 +1100432 XB_TRACE(bp, "lookup_pages", (long)page_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 return error;
434}
435
436/*
437 * Map buffer into kernel address-space if nessecary.
438 */
439STATIC int
Nathan Scottce8e9222006-01-11 15:39:08 +1100440_xfs_buf_map_pages(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 xfs_buf_t *bp,
442 uint flags)
443{
444 /* A single page buffer is always mappable */
Nathan Scottce8e9222006-01-11 15:39:08 +1100445 if (bp->b_page_count == 1) {
446 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
447 bp->b_flags |= XBF_MAPPED;
448 } else if (flags & XBF_MAPPED) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 if (as_list_len > 64)
450 purge_addresses();
Nathan Scottce8e9222006-01-11 15:39:08 +1100451 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
452 VM_MAP, PAGE_KERNEL);
453 if (unlikely(bp->b_addr == NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 return -ENOMEM;
Nathan Scottce8e9222006-01-11 15:39:08 +1100455 bp->b_addr += bp->b_offset;
456 bp->b_flags |= XBF_MAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 }
458
459 return 0;
460}
461
462/*
463 * Finding and Reading Buffers
464 */
465
466/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100467 * Look up, and creates if absent, a lockable buffer for
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 * a given range of an inode. The buffer is returned
469 * locked. If other overlapping buffers exist, they are
470 * released before the new buffer is created and locked,
471 * which may imply that this call will block until those buffers
472 * are unlocked. No I/O is implied by this call.
473 */
474xfs_buf_t *
Nathan Scottce8e9222006-01-11 15:39:08 +1100475_xfs_buf_find(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 xfs_buftarg_t *btp, /* block device target */
Nathan Scott204ab252006-01-11 20:50:22 +1100477 xfs_off_t ioff, /* starting offset of range */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 size_t isize, /* length of range */
Nathan Scottce8e9222006-01-11 15:39:08 +1100479 xfs_buf_flags_t flags,
480 xfs_buf_t *new_bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481{
Nathan Scott204ab252006-01-11 20:50:22 +1100482 xfs_off_t range_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 size_t range_length;
484 xfs_bufhash_t *hash;
Nathan Scottce8e9222006-01-11 15:39:08 +1100485 xfs_buf_t *bp, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486
487 range_base = (ioff << BBSHIFT);
488 range_length = (isize << BBSHIFT);
489
490 /* Check for IOs smaller than the sector size / not sector aligned */
Nathan Scottce8e9222006-01-11 15:39:08 +1100491 ASSERT(!(range_length < (1 << btp->bt_sshift)));
Nathan Scott204ab252006-01-11 20:50:22 +1100492 ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493
494 hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
495
496 spin_lock(&hash->bh_lock);
497
Nathan Scottce8e9222006-01-11 15:39:08 +1100498 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
499 ASSERT(btp == bp->b_target);
500 if (bp->b_file_offset == range_base &&
501 bp->b_buffer_length == range_length) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 /*
Nathan Scottce8e9222006-01-11 15:39:08 +1100503 * If we look at something, bring it to the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 * front of the list for next time.
505 */
Nathan Scottce8e9222006-01-11 15:39:08 +1100506 atomic_inc(&bp->b_hold);
507 list_move(&bp->b_hash_list, &hash->bh_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 goto found;
509 }
510 }
511
512 /* No match found */
Nathan Scottce8e9222006-01-11 15:39:08 +1100513 if (new_bp) {
514 _xfs_buf_initialize(new_bp, btp, range_base,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 range_length, flags);
Nathan Scottce8e9222006-01-11 15:39:08 +1100516 new_bp->b_hash = hash;
517 list_add(&new_bp->b_hash_list, &hash->bh_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 } else {
Nathan Scottce8e9222006-01-11 15:39:08 +1100519 XFS_STATS_INC(xb_miss_locked);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 }
521
522 spin_unlock(&hash->bh_lock);
Nathan Scottce8e9222006-01-11 15:39:08 +1100523 return new_bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524
525found:
526 spin_unlock(&hash->bh_lock);
527
528 /* Attempt to get the semaphore without sleeping,
529 * if this does not work then we need to drop the
530 * spinlock and do a hard attempt on the semaphore.
531 */
Nathan Scottce8e9222006-01-11 15:39:08 +1100532 if (down_trylock(&bp->b_sema)) {
533 if (!(flags & XBF_TRYLOCK)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 /* wait for buffer ownership */
Nathan Scottce8e9222006-01-11 15:39:08 +1100535 XB_TRACE(bp, "get_lock", 0);
536 xfs_buf_lock(bp);
537 XFS_STATS_INC(xb_get_locked_waited);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 } else {
539 /* We asked for a trylock and failed, no need
540 * to look at file offset and length here, we
Nathan Scottce8e9222006-01-11 15:39:08 +1100541 * know that this buffer at least overlaps our
542 * buffer and is locked, therefore our buffer
543 * either does not exist, or is this buffer.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 */
Nathan Scottce8e9222006-01-11 15:39:08 +1100545 xfs_buf_rele(bp);
546 XFS_STATS_INC(xb_busy_locked);
547 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 }
549 } else {
550 /* trylock worked */
Nathan Scottce8e9222006-01-11 15:39:08 +1100551 XB_SET_OWNER(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 }
553
Nathan Scottce8e9222006-01-11 15:39:08 +1100554 if (bp->b_flags & XBF_STALE) {
555 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
556 bp->b_flags &= XBF_MAPPED;
David Chinner2f926582005-09-05 08:33:35 +1000557 }
Nathan Scottce8e9222006-01-11 15:39:08 +1100558 XB_TRACE(bp, "got_lock", 0);
559 XFS_STATS_INC(xb_get_locked);
560 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561}
562
563/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100564 * Assembles a buffer covering the specified range.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 * Storage in memory for all portions of the buffer will be allocated,
566 * although backing storage may not be.
567 */
568xfs_buf_t *
Nathan Scottce8e9222006-01-11 15:39:08 +1100569xfs_buf_get_flags(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 xfs_buftarg_t *target,/* target for buffer */
Nathan Scott204ab252006-01-11 20:50:22 +1100571 xfs_off_t ioff, /* starting offset of range */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 size_t isize, /* length of range */
Nathan Scottce8e9222006-01-11 15:39:08 +1100573 xfs_buf_flags_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574{
Nathan Scottce8e9222006-01-11 15:39:08 +1100575 xfs_buf_t *bp, *new_bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 int error = 0, i;
577
Nathan Scottce8e9222006-01-11 15:39:08 +1100578 new_bp = xfs_buf_allocate(flags);
579 if (unlikely(!new_bp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 return NULL;
581
Nathan Scottce8e9222006-01-11 15:39:08 +1100582 bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
583 if (bp == new_bp) {
584 error = _xfs_buf_lookup_pages(bp, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 if (error)
586 goto no_buffer;
587 } else {
Nathan Scottce8e9222006-01-11 15:39:08 +1100588 xfs_buf_deallocate(new_bp);
589 if (unlikely(bp == NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 return NULL;
591 }
592
Nathan Scottce8e9222006-01-11 15:39:08 +1100593 for (i = 0; i < bp->b_page_count; i++)
594 mark_page_accessed(bp->b_pages[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595
Nathan Scottce8e9222006-01-11 15:39:08 +1100596 if (!(bp->b_flags & XBF_MAPPED)) {
597 error = _xfs_buf_map_pages(bp, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 if (unlikely(error)) {
599 printk(KERN_WARNING "%s: failed to map pages\n",
600 __FUNCTION__);
601 goto no_buffer;
602 }
603 }
604
Nathan Scottce8e9222006-01-11 15:39:08 +1100605 XFS_STATS_INC(xb_get);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
607 /*
608 * Always fill in the block number now, the mapped cases can do
609 * their own overlay of this later.
610 */
Nathan Scottce8e9222006-01-11 15:39:08 +1100611 bp->b_bn = ioff;
612 bp->b_count_desired = bp->b_buffer_length;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
Nathan Scottce8e9222006-01-11 15:39:08 +1100614 XB_TRACE(bp, "get", (unsigned long)flags);
615 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
617 no_buffer:
Nathan Scottce8e9222006-01-11 15:39:08 +1100618 if (flags & (XBF_LOCK | XBF_TRYLOCK))
619 xfs_buf_unlock(bp);
620 xfs_buf_rele(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 return NULL;
622}
623
624xfs_buf_t *
625xfs_buf_read_flags(
626 xfs_buftarg_t *target,
Nathan Scott204ab252006-01-11 20:50:22 +1100627 xfs_off_t ioff,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 size_t isize,
Nathan Scottce8e9222006-01-11 15:39:08 +1100629 xfs_buf_flags_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630{
Nathan Scottce8e9222006-01-11 15:39:08 +1100631 xfs_buf_t *bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632
Nathan Scottce8e9222006-01-11 15:39:08 +1100633 flags |= XBF_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634
Nathan Scottce8e9222006-01-11 15:39:08 +1100635 bp = xfs_buf_get_flags(target, ioff, isize, flags);
636 if (bp) {
637 if (!XFS_BUF_ISDONE(bp)) {
638 XB_TRACE(bp, "read", (unsigned long)flags);
639 XFS_STATS_INC(xb_get_read);
640 xfs_buf_iostart(bp, flags);
641 } else if (flags & XBF_ASYNC) {
642 XB_TRACE(bp, "read_async", (unsigned long)flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 /*
644 * Read ahead call which is already satisfied,
645 * drop the buffer
646 */
647 goto no_buffer;
648 } else {
Nathan Scottce8e9222006-01-11 15:39:08 +1100649 XB_TRACE(bp, "read_done", (unsigned long)flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 /* We do not want read in the flags */
Nathan Scottce8e9222006-01-11 15:39:08 +1100651 bp->b_flags &= ~XBF_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 }
653 }
654
Nathan Scottce8e9222006-01-11 15:39:08 +1100655 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
657 no_buffer:
Nathan Scottce8e9222006-01-11 15:39:08 +1100658 if (flags & (XBF_LOCK | XBF_TRYLOCK))
659 xfs_buf_unlock(bp);
660 xfs_buf_rele(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 return NULL;
662}
663
664/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100665 * If we are not low on memory then do the readahead in a deadlock
666 * safe manner.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 */
668void
Nathan Scottce8e9222006-01-11 15:39:08 +1100669xfs_buf_readahead(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 xfs_buftarg_t *target,
Nathan Scott204ab252006-01-11 20:50:22 +1100671 xfs_off_t ioff,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 size_t isize,
Nathan Scottce8e9222006-01-11 15:39:08 +1100673 xfs_buf_flags_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674{
675 struct backing_dev_info *bdi;
676
Nathan Scottce8e9222006-01-11 15:39:08 +1100677 bdi = target->bt_mapping->backing_dev_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 if (bdi_read_congested(bdi))
679 return;
680
Nathan Scottce8e9222006-01-11 15:39:08 +1100681 flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 xfs_buf_read_flags(target, ioff, isize, flags);
683}
684
685xfs_buf_t *
Nathan Scottce8e9222006-01-11 15:39:08 +1100686xfs_buf_get_empty(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 size_t len,
688 xfs_buftarg_t *target)
689{
Nathan Scottce8e9222006-01-11 15:39:08 +1100690 xfs_buf_t *bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
Nathan Scottce8e9222006-01-11 15:39:08 +1100692 bp = xfs_buf_allocate(0);
693 if (bp)
694 _xfs_buf_initialize(bp, target, 0, len, 0);
695 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696}
697
698static inline struct page *
699mem_to_page(
700 void *addr)
701{
702 if (((unsigned long)addr < VMALLOC_START) ||
703 ((unsigned long)addr >= VMALLOC_END)) {
704 return virt_to_page(addr);
705 } else {
706 return vmalloc_to_page(addr);
707 }
708}
709
710int
Nathan Scottce8e9222006-01-11 15:39:08 +1100711xfs_buf_associate_memory(
712 xfs_buf_t *bp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 void *mem,
714 size_t len)
715{
716 int rval;
717 int i = 0;
718 size_t ptr;
719 size_t end, end_cur;
720 off_t offset;
721 int page_count;
722
723 page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
724 offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);
725 if (offset && (len > PAGE_CACHE_SIZE))
726 page_count++;
727
728 /* Free any previous set of page pointers */
Nathan Scottce8e9222006-01-11 15:39:08 +1100729 if (bp->b_pages)
730 _xfs_buf_free_pages(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731
Nathan Scottce8e9222006-01-11 15:39:08 +1100732 bp->b_pages = NULL;
733 bp->b_addr = mem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
Nathan Scottce8e9222006-01-11 15:39:08 +1100735 rval = _xfs_buf_get_pages(bp, page_count, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 if (rval)
737 return rval;
738
Nathan Scottce8e9222006-01-11 15:39:08 +1100739 bp->b_offset = offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 ptr = (size_t) mem & PAGE_CACHE_MASK;
741 end = PAGE_CACHE_ALIGN((size_t) mem + len);
742 end_cur = end;
743 /* set up first page */
Nathan Scottce8e9222006-01-11 15:39:08 +1100744 bp->b_pages[0] = mem_to_page(mem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
746 ptr += PAGE_CACHE_SIZE;
Nathan Scottce8e9222006-01-11 15:39:08 +1100747 bp->b_page_count = ++i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 while (ptr < end) {
Nathan Scottce8e9222006-01-11 15:39:08 +1100749 bp->b_pages[i] = mem_to_page((void *)ptr);
750 bp->b_page_count = ++i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 ptr += PAGE_CACHE_SIZE;
752 }
Nathan Scottce8e9222006-01-11 15:39:08 +1100753 bp->b_locked = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754
Nathan Scottce8e9222006-01-11 15:39:08 +1100755 bp->b_count_desired = bp->b_buffer_length = len;
756 bp->b_flags |= XBF_MAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
758 return 0;
759}
760
761xfs_buf_t *
Nathan Scottce8e9222006-01-11 15:39:08 +1100762xfs_buf_get_noaddr(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 size_t len,
764 xfs_buftarg_t *target)
765{
766 size_t malloc_len = len;
767 xfs_buf_t *bp;
768 void *data;
769 int error;
770
Nathan Scottce8e9222006-01-11 15:39:08 +1100771 bp = xfs_buf_allocate(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 if (unlikely(bp == NULL))
773 goto fail;
Nathan Scottce8e9222006-01-11 15:39:08 +1100774 _xfs_buf_initialize(bp, target, 0, len, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775
776 try_again:
Nathan Scottefb8ad72006-09-28 11:03:05 +1000777 data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL | KM_LARGE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 if (unlikely(data == NULL))
779 goto fail_free_buf;
780
781 /* check whether alignment matches.. */
782 if ((__psunsigned_t)data !=
Nathan Scottce8e9222006-01-11 15:39:08 +1100783 ((__psunsigned_t)data & ~target->bt_smask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 /* .. else double the size and try again */
785 kmem_free(data, malloc_len);
786 malloc_len <<= 1;
787 goto try_again;
788 }
789
Nathan Scottce8e9222006-01-11 15:39:08 +1100790 error = xfs_buf_associate_memory(bp, data, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 if (error)
792 goto fail_free_mem;
Nathan Scottce8e9222006-01-11 15:39:08 +1100793 bp->b_flags |= _XBF_KMEM_ALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794
Nathan Scottce8e9222006-01-11 15:39:08 +1100795 xfs_buf_unlock(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796
Nathan Scottce8e9222006-01-11 15:39:08 +1100797 XB_TRACE(bp, "no_daddr", data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 return bp;
799 fail_free_mem:
800 kmem_free(data, malloc_len);
801 fail_free_buf:
Nathan Scottce8e9222006-01-11 15:39:08 +1100802 xfs_buf_free(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 fail:
804 return NULL;
805}
806
807/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 * Increment reference count on buffer, to hold the buffer concurrently
809 * with another thread which may release (free) the buffer asynchronously.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 * Must hold the buffer already to call this function.
811 */
812void
Nathan Scottce8e9222006-01-11 15:39:08 +1100813xfs_buf_hold(
814 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815{
Nathan Scottce8e9222006-01-11 15:39:08 +1100816 atomic_inc(&bp->b_hold);
817 XB_TRACE(bp, "hold", 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818}
819
820/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100821 * Releases a hold on the specified buffer. If the
822 * the hold count is 1, calls xfs_buf_free.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 */
824void
Nathan Scottce8e9222006-01-11 15:39:08 +1100825xfs_buf_rele(
826 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827{
Nathan Scottce8e9222006-01-11 15:39:08 +1100828 xfs_bufhash_t *hash = bp->b_hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829
Nathan Scottce8e9222006-01-11 15:39:08 +1100830 XB_TRACE(bp, "rele", bp->b_relse);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831
Nathan Scottfad3aa12006-02-01 12:14:52 +1100832 if (unlikely(!hash)) {
833 ASSERT(!bp->b_relse);
834 if (atomic_dec_and_test(&bp->b_hold))
835 xfs_buf_free(bp);
836 return;
837 }
838
Nathan Scottce8e9222006-01-11 15:39:08 +1100839 if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
840 if (bp->b_relse) {
841 atomic_inc(&bp->b_hold);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 spin_unlock(&hash->bh_lock);
Nathan Scottce8e9222006-01-11 15:39:08 +1100843 (*(bp->b_relse)) (bp);
844 } else if (bp->b_flags & XBF_FS_MANAGED) {
Christoph Hellwig7f14d0a2005-11-02 15:09:35 +1100845 spin_unlock(&hash->bh_lock);
846 } else {
Nathan Scottce8e9222006-01-11 15:39:08 +1100847 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
848 list_del_init(&bp->b_hash_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 spin_unlock(&hash->bh_lock);
Nathan Scottce8e9222006-01-11 15:39:08 +1100850 xfs_buf_free(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 }
David Chinner2f926582005-09-05 08:33:35 +1000852 } else {
853 /*
854 * Catch reference count leaks
855 */
Nathan Scottce8e9222006-01-11 15:39:08 +1100856 ASSERT(atomic_read(&bp->b_hold) >= 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 }
858}
859
860
861/*
862 * Mutual exclusion on buffers. Locking model:
863 *
864 * Buffers associated with inodes for which buffer locking
865 * is not enabled are not protected by semaphores, and are
866 * assumed to be exclusively owned by the caller. There is a
867 * spinlock in the buffer, used by the caller when concurrent
868 * access is possible.
869 */
870
871/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100872 * Locks a buffer object, if it is not already locked.
873 * Note that this in no way locks the underlying pages, so it is only
874 * useful for synchronizing concurrent use of buffer objects, not for
875 * synchronizing independent access to the underlying pages.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 */
877int
Nathan Scottce8e9222006-01-11 15:39:08 +1100878xfs_buf_cond_lock(
879 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880{
881 int locked;
882
Nathan Scottce8e9222006-01-11 15:39:08 +1100883 locked = down_trylock(&bp->b_sema) == 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 if (locked) {
Nathan Scottce8e9222006-01-11 15:39:08 +1100885 XB_SET_OWNER(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 }
Nathan Scottce8e9222006-01-11 15:39:08 +1100887 XB_TRACE(bp, "cond_lock", (long)locked);
888 return locked ? 0 : -EBUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889}
890
891#if defined(DEBUG) || defined(XFS_BLI_TRACE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892int
Nathan Scottce8e9222006-01-11 15:39:08 +1100893xfs_buf_lock_value(
894 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895{
Nathan Scottce8e9222006-01-11 15:39:08 +1100896 return atomic_read(&bp->b_sema.count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897}
898#endif
899
900/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100901 * Locks a buffer object.
902 * Note that this in no way locks the underlying pages, so it is only
903 * useful for synchronizing concurrent use of buffer objects, not for
904 * synchronizing independent access to the underlying pages.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 */
Nathan Scottce8e9222006-01-11 15:39:08 +1100906void
907xfs_buf_lock(
908 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909{
Nathan Scottce8e9222006-01-11 15:39:08 +1100910 XB_TRACE(bp, "lock", 0);
911 if (atomic_read(&bp->b_io_remaining))
912 blk_run_address_space(bp->b_target->bt_mapping);
913 down(&bp->b_sema);
914 XB_SET_OWNER(bp);
915 XB_TRACE(bp, "locked", 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916}
917
918/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100919 * Releases the lock on the buffer object.
David Chinner2f926582005-09-05 08:33:35 +1000920 * If the buffer is marked delwri but is not queued, do so before we
Nathan Scottce8e9222006-01-11 15:39:08 +1100921 * unlock the buffer as we need to set flags correctly. We also need to
David Chinner2f926582005-09-05 08:33:35 +1000922 * take a reference for the delwri queue because the unlocker is going to
923 * drop their's and they don't know we just queued it.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 */
925void
Nathan Scottce8e9222006-01-11 15:39:08 +1100926xfs_buf_unlock(
927 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928{
Nathan Scottce8e9222006-01-11 15:39:08 +1100929 if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
930 atomic_inc(&bp->b_hold);
931 bp->b_flags |= XBF_ASYNC;
932 xfs_buf_delwri_queue(bp, 0);
David Chinner2f926582005-09-05 08:33:35 +1000933 }
934
Nathan Scottce8e9222006-01-11 15:39:08 +1100935 XB_CLEAR_OWNER(bp);
936 up(&bp->b_sema);
937 XB_TRACE(bp, "unlock", 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938}
939
940
941/*
942 * Pinning Buffer Storage in Memory
Nathan Scottce8e9222006-01-11 15:39:08 +1100943 * Ensure that no attempt to force a buffer to disk will succeed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 */
945void
Nathan Scottce8e9222006-01-11 15:39:08 +1100946xfs_buf_pin(
947 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948{
Nathan Scottce8e9222006-01-11 15:39:08 +1100949 atomic_inc(&bp->b_pin_count);
950 XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951}
952
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953void
Nathan Scottce8e9222006-01-11 15:39:08 +1100954xfs_buf_unpin(
955 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956{
Nathan Scottce8e9222006-01-11 15:39:08 +1100957 if (atomic_dec_and_test(&bp->b_pin_count))
958 wake_up_all(&bp->b_waiters);
959 XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960}
961
962int
Nathan Scottce8e9222006-01-11 15:39:08 +1100963xfs_buf_ispin(
964 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965{
Nathan Scottce8e9222006-01-11 15:39:08 +1100966 return atomic_read(&bp->b_pin_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967}
968
Nathan Scottce8e9222006-01-11 15:39:08 +1100969STATIC void
970xfs_buf_wait_unpin(
971 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972{
973 DECLARE_WAITQUEUE (wait, current);
974
Nathan Scottce8e9222006-01-11 15:39:08 +1100975 if (atomic_read(&bp->b_pin_count) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 return;
977
Nathan Scottce8e9222006-01-11 15:39:08 +1100978 add_wait_queue(&bp->b_waiters, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 for (;;) {
980 set_current_state(TASK_UNINTERRUPTIBLE);
Nathan Scottce8e9222006-01-11 15:39:08 +1100981 if (atomic_read(&bp->b_pin_count) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 break;
Nathan Scottce8e9222006-01-11 15:39:08 +1100983 if (atomic_read(&bp->b_io_remaining))
984 blk_run_address_space(bp->b_target->bt_mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 schedule();
986 }
Nathan Scottce8e9222006-01-11 15:39:08 +1100987 remove_wait_queue(&bp->b_waiters, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 set_current_state(TASK_RUNNING);
989}
990
991/*
992 * Buffer Utility Routines
993 */
994
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995STATIC void
Nathan Scottce8e9222006-01-11 15:39:08 +1100996xfs_buf_iodone_work(
David Howellsc4028952006-11-22 14:57:56 +0000997 struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998{
David Howellsc4028952006-11-22 14:57:56 +0000999 xfs_buf_t *bp =
1000 container_of(work, xfs_buf_t, b_iodone_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
Nathan Scottce8e9222006-01-11 15:39:08 +11001002 if (bp->b_iodone)
1003 (*(bp->b_iodone))(bp);
1004 else if (bp->b_flags & XBF_ASYNC)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 xfs_buf_relse(bp);
1006}
1007
1008void
Nathan Scottce8e9222006-01-11 15:39:08 +11001009xfs_buf_ioend(
1010 xfs_buf_t *bp,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 int schedule)
1012{
Nathan Scottce8e9222006-01-11 15:39:08 +11001013 bp->b_flags &= ~(XBF_READ | XBF_WRITE);
1014 if (bp->b_error == 0)
1015 bp->b_flags |= XBF_DONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016
Nathan Scottce8e9222006-01-11 15:39:08 +11001017 XB_TRACE(bp, "iodone", bp->b_iodone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018
Nathan Scottce8e9222006-01-11 15:39:08 +11001019 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 if (schedule) {
David Howellsc4028952006-11-22 14:57:56 +00001021 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
Nathan Scottce8e9222006-01-11 15:39:08 +11001022 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 } else {
David Howellsc4028952006-11-22 14:57:56 +00001024 xfs_buf_iodone_work(&bp->b_iodone_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 }
1026 } else {
Nathan Scottce8e9222006-01-11 15:39:08 +11001027 up(&bp->b_iodonesema);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 }
1029}
1030
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031void
Nathan Scottce8e9222006-01-11 15:39:08 +11001032xfs_buf_ioerror(
1033 xfs_buf_t *bp,
1034 int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035{
1036 ASSERT(error >= 0 && error <= 0xffff);
Nathan Scottce8e9222006-01-11 15:39:08 +11001037 bp->b_error = (unsigned short)error;
1038 XB_TRACE(bp, "ioerror", (unsigned long)error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039}
1040
1041/*
Nathan Scottce8e9222006-01-11 15:39:08 +11001042 * Initiate I/O on a buffer, based on the flags supplied.
1043 * The b_iodone routine in the buffer supplied will only be called
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 * when all of the subsidiary I/O requests, if any, have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 */
1046int
Nathan Scottce8e9222006-01-11 15:39:08 +11001047xfs_buf_iostart(
1048 xfs_buf_t *bp,
1049 xfs_buf_flags_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050{
1051 int status = 0;
1052
Nathan Scottce8e9222006-01-11 15:39:08 +11001053 XB_TRACE(bp, "iostart", (unsigned long)flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054
Nathan Scottce8e9222006-01-11 15:39:08 +11001055 if (flags & XBF_DELWRI) {
1056 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
1057 bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
1058 xfs_buf_delwri_queue(bp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 return status;
1060 }
1061
Nathan Scottce8e9222006-01-11 15:39:08 +11001062 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
1063 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1064 bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
1065 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066
Nathan Scottce8e9222006-01-11 15:39:08 +11001067 BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068
1069 /* For writes allow an alternate strategy routine to precede
1070 * the actual I/O request (which may not be issued at all in
1071 * a shutdown situation, for example).
1072 */
Nathan Scottce8e9222006-01-11 15:39:08 +11001073 status = (flags & XBF_WRITE) ?
1074 xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
1076 /* Wait for I/O if we are not an async request.
1077 * Note: async I/O request completion will release the buffer,
1078 * and that can already be done by this point. So using the
1079 * buffer pointer from here on, after async I/O, is invalid.
1080 */
Nathan Scottce8e9222006-01-11 15:39:08 +11001081 if (!status && !(flags & XBF_ASYNC))
1082 status = xfs_buf_iowait(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083
1084 return status;
1085}
1086
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087STATIC __inline__ int
Nathan Scottce8e9222006-01-11 15:39:08 +11001088_xfs_buf_iolocked(
1089 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090{
Nathan Scottce8e9222006-01-11 15:39:08 +11001091 ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
1092 if (bp->b_flags & XBF_READ)
1093 return bp->b_locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 return 0;
1095}
1096
1097STATIC __inline__ void
Nathan Scottce8e9222006-01-11 15:39:08 +11001098_xfs_buf_ioend(
1099 xfs_buf_t *bp,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 int schedule)
1101{
Nathan Scottce8e9222006-01-11 15:39:08 +11001102 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1103 bp->b_locked = 0;
1104 xfs_buf_ioend(bp, schedule);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 }
1106}
1107
1108STATIC int
Nathan Scottce8e9222006-01-11 15:39:08 +11001109xfs_buf_bio_end_io(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 struct bio *bio,
1111 unsigned int bytes_done,
1112 int error)
1113{
Nathan Scottce8e9222006-01-11 15:39:08 +11001114 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1115 unsigned int blocksize = bp->b_target->bt_bsize;
Nathan Scotteedb5532005-09-02 16:39:56 +10001116 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117
1118 if (bio->bi_size)
1119 return 1;
1120
1121 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
Nathan Scottce8e9222006-01-11 15:39:08 +11001122 bp->b_error = EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123
Nathan Scotteedb5532005-09-02 16:39:56 +10001124 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 struct page *page = bvec->bv_page;
1126
Nathan Scott948ecdb2006-09-28 11:03:13 +10001127 ASSERT(!PagePrivate(page));
Nathan Scottce8e9222006-01-11 15:39:08 +11001128 if (unlikely(bp->b_error)) {
1129 if (bp->b_flags & XBF_READ)
Nathan Scotteedb5532005-09-02 16:39:56 +10001130 ClearPageUptodate(page);
Nathan Scottce8e9222006-01-11 15:39:08 +11001131 } else if (blocksize >= PAGE_CACHE_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 SetPageUptodate(page);
1133 } else if (!PagePrivate(page) &&
Nathan Scottce8e9222006-01-11 15:39:08 +11001134 (bp->b_flags & _XBF_PAGE_CACHE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 set_page_region(page, bvec->bv_offset, bvec->bv_len);
1136 }
1137
Nathan Scotteedb5532005-09-02 16:39:56 +10001138 if (--bvec >= bio->bi_io_vec)
1139 prefetchw(&bvec->bv_page->flags);
1140
Nathan Scottce8e9222006-01-11 15:39:08 +11001141 if (_xfs_buf_iolocked(bp)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 unlock_page(page);
1143 }
Nathan Scotteedb5532005-09-02 16:39:56 +10001144 } while (bvec >= bio->bi_io_vec);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
Nathan Scottce8e9222006-01-11 15:39:08 +11001146 _xfs_buf_ioend(bp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 bio_put(bio);
1148 return 0;
1149}
1150
1151STATIC void
Nathan Scottce8e9222006-01-11 15:39:08 +11001152_xfs_buf_ioapply(
1153 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154{
1155 int i, rw, map_i, total_nr_pages, nr_pages;
1156 struct bio *bio;
Nathan Scottce8e9222006-01-11 15:39:08 +11001157 int offset = bp->b_offset;
1158 int size = bp->b_count_desired;
1159 sector_t sector = bp->b_bn;
1160 unsigned int blocksize = bp->b_target->bt_bsize;
1161 int locking = _xfs_buf_iolocked(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162
Nathan Scottce8e9222006-01-11 15:39:08 +11001163 total_nr_pages = bp->b_page_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 map_i = 0;
1165
Nathan Scottce8e9222006-01-11 15:39:08 +11001166 if (bp->b_flags & XBF_ORDERED) {
1167 ASSERT(!(bp->b_flags & XBF_READ));
Christoph Hellwigf538d4d2005-11-02 10:26:59 +11001168 rw = WRITE_BARRIER;
Nathan Scott51bdd702006-09-28 11:01:57 +10001169 } else if (bp->b_flags & _XBF_RUN_QUEUES) {
1170 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1171 bp->b_flags &= ~_XBF_RUN_QUEUES;
1172 rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
1173 } else {
1174 rw = (bp->b_flags & XBF_WRITE) ? WRITE :
1175 (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
Christoph Hellwigf538d4d2005-11-02 10:26:59 +11001176 }
1177
Nathan Scottce8e9222006-01-11 15:39:08 +11001178 /* Special code path for reading a sub page size buffer in --
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 * we populate up the whole page, and hence the other metadata
1180 * in the same page. This optimization is only valid when the
Nathan Scottce8e9222006-01-11 15:39:08 +11001181 * filesystem block size is not smaller than the page size.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 */
Nathan Scottce8e9222006-01-11 15:39:08 +11001183 if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1184 (bp->b_flags & XBF_READ) && locking &&
1185 (blocksize >= PAGE_CACHE_SIZE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 bio = bio_alloc(GFP_NOIO, 1);
1187
Nathan Scottce8e9222006-01-11 15:39:08 +11001188 bio->bi_bdev = bp->b_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 bio->bi_sector = sector - (offset >> BBSHIFT);
Nathan Scottce8e9222006-01-11 15:39:08 +11001190 bio->bi_end_io = xfs_buf_bio_end_io;
1191 bio->bi_private = bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192
Nathan Scottce8e9222006-01-11 15:39:08 +11001193 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 size = 0;
1195
Nathan Scottce8e9222006-01-11 15:39:08 +11001196 atomic_inc(&bp->b_io_remaining);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197
1198 goto submit_io;
1199 }
1200
1201 /* Lock down the pages which we need to for the request */
Nathan Scottce8e9222006-01-11 15:39:08 +11001202 if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 for (i = 0; size; i++) {
1204 int nbytes = PAGE_CACHE_SIZE - offset;
Nathan Scottce8e9222006-01-11 15:39:08 +11001205 struct page *page = bp->b_pages[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
1207 if (nbytes > size)
1208 nbytes = size;
1209
1210 lock_page(page);
1211
1212 size -= nbytes;
1213 offset = 0;
1214 }
Nathan Scottce8e9222006-01-11 15:39:08 +11001215 offset = bp->b_offset;
1216 size = bp->b_count_desired;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 }
1218
1219next_chunk:
Nathan Scottce8e9222006-01-11 15:39:08 +11001220 atomic_inc(&bp->b_io_remaining);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1222 if (nr_pages > total_nr_pages)
1223 nr_pages = total_nr_pages;
1224
1225 bio = bio_alloc(GFP_NOIO, nr_pages);
Nathan Scottce8e9222006-01-11 15:39:08 +11001226 bio->bi_bdev = bp->b_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 bio->bi_sector = sector;
Nathan Scottce8e9222006-01-11 15:39:08 +11001228 bio->bi_end_io = xfs_buf_bio_end_io;
1229 bio->bi_private = bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230
1231 for (; size && nr_pages; nr_pages--, map_i++) {
Nathan Scottce8e9222006-01-11 15:39:08 +11001232 int rbytes, nbytes = PAGE_CACHE_SIZE - offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233
1234 if (nbytes > size)
1235 nbytes = size;
1236
Nathan Scottce8e9222006-01-11 15:39:08 +11001237 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1238 if (rbytes < nbytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 break;
1240
1241 offset = 0;
1242 sector += nbytes >> BBSHIFT;
1243 size -= nbytes;
1244 total_nr_pages--;
1245 }
1246
1247submit_io:
1248 if (likely(bio->bi_size)) {
1249 submit_bio(rw, bio);
1250 if (size)
1251 goto next_chunk;
1252 } else {
1253 bio_put(bio);
Nathan Scottce8e9222006-01-11 15:39:08 +11001254 xfs_buf_ioerror(bp, EIO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 }
1256}
1257
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258int
Nathan Scottce8e9222006-01-11 15:39:08 +11001259xfs_buf_iorequest(
1260 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261{
Nathan Scottce8e9222006-01-11 15:39:08 +11001262 XB_TRACE(bp, "iorequest", 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263
Nathan Scottce8e9222006-01-11 15:39:08 +11001264 if (bp->b_flags & XBF_DELWRI) {
1265 xfs_buf_delwri_queue(bp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 return 0;
1267 }
1268
Nathan Scottce8e9222006-01-11 15:39:08 +11001269 if (bp->b_flags & XBF_WRITE) {
1270 xfs_buf_wait_unpin(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 }
1272
Nathan Scottce8e9222006-01-11 15:39:08 +11001273 xfs_buf_hold(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
1275 /* Set the count to 1 initially, this will stop an I/O
1276 * completion callout which happens before we have started
Nathan Scottce8e9222006-01-11 15:39:08 +11001277 * all the I/O from calling xfs_buf_ioend too early.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 */
Nathan Scottce8e9222006-01-11 15:39:08 +11001279 atomic_set(&bp->b_io_remaining, 1);
1280 _xfs_buf_ioapply(bp);
1281 _xfs_buf_ioend(bp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282
Nathan Scottce8e9222006-01-11 15:39:08 +11001283 xfs_buf_rele(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 return 0;
1285}
1286
1287/*
Nathan Scottce8e9222006-01-11 15:39:08 +11001288 * Waits for I/O to complete on the buffer supplied.
1289 * It returns immediately if no I/O is pending.
1290 * It returns the I/O error code, if any, or 0 if there was no error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 */
1292int
Nathan Scottce8e9222006-01-11 15:39:08 +11001293xfs_buf_iowait(
1294 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295{
Nathan Scottce8e9222006-01-11 15:39:08 +11001296 XB_TRACE(bp, "iowait", 0);
1297 if (atomic_read(&bp->b_io_remaining))
1298 blk_run_address_space(bp->b_target->bt_mapping);
1299 down(&bp->b_iodonesema);
1300 XB_TRACE(bp, "iowaited", (long)bp->b_error);
1301 return bp->b_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302}
1303
Nathan Scottce8e9222006-01-11 15:39:08 +11001304xfs_caddr_t
1305xfs_buf_offset(
1306 xfs_buf_t *bp,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 size_t offset)
1308{
1309 struct page *page;
1310
Nathan Scottce8e9222006-01-11 15:39:08 +11001311 if (bp->b_flags & XBF_MAPPED)
1312 return XFS_BUF_PTR(bp) + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
Nathan Scottce8e9222006-01-11 15:39:08 +11001314 offset += bp->b_offset;
1315 page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
1316 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317}
1318
1319/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 * Move data into or out of a buffer.
1321 */
1322void
Nathan Scottce8e9222006-01-11 15:39:08 +11001323xfs_buf_iomove(
1324 xfs_buf_t *bp, /* buffer to process */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 size_t boff, /* starting buffer offset */
1326 size_t bsize, /* length to copy */
1327 caddr_t data, /* data address */
Nathan Scottce8e9222006-01-11 15:39:08 +11001328 xfs_buf_rw_t mode) /* read/write/zero flag */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329{
1330 size_t bend, cpoff, csize;
1331 struct page *page;
1332
1333 bend = boff + bsize;
1334 while (boff < bend) {
Nathan Scottce8e9222006-01-11 15:39:08 +11001335 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1336 cpoff = xfs_buf_poff(boff + bp->b_offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 csize = min_t(size_t,
Nathan Scottce8e9222006-01-11 15:39:08 +11001338 PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339
1340 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1341
1342 switch (mode) {
Nathan Scottce8e9222006-01-11 15:39:08 +11001343 case XBRW_ZERO:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 memset(page_address(page) + cpoff, 0, csize);
1345 break;
Nathan Scottce8e9222006-01-11 15:39:08 +11001346 case XBRW_READ:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 memcpy(data, page_address(page) + cpoff, csize);
1348 break;
Nathan Scottce8e9222006-01-11 15:39:08 +11001349 case XBRW_WRITE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 memcpy(page_address(page) + cpoff, data, csize);
1351 }
1352
1353 boff += csize;
1354 data += csize;
1355 }
1356}
1357
1358/*
Nathan Scottce8e9222006-01-11 15:39:08 +11001359 * Handling of buffer targets (buftargs).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 */
1361
1362/*
Nathan Scottce8e9222006-01-11 15:39:08 +11001363 * Wait for any bufs with callbacks that have been submitted but
1364 * have not yet returned... walk the hash list for the target.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 */
1366void
1367xfs_wait_buftarg(
1368 xfs_buftarg_t *btp)
1369{
1370 xfs_buf_t *bp, *n;
1371 xfs_bufhash_t *hash;
1372 uint i;
1373
1374 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1375 hash = &btp->bt_hash[i];
1376again:
1377 spin_lock(&hash->bh_lock);
Nathan Scottce8e9222006-01-11 15:39:08 +11001378 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
1379 ASSERT(btp == bp->b_target);
1380 if (!(bp->b_flags & XBF_FS_MANAGED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 spin_unlock(&hash->bh_lock);
David Chinner2f926582005-09-05 08:33:35 +10001382 /*
1383 * Catch superblock reference count leaks
1384 * immediately
1385 */
Nathan Scottce8e9222006-01-11 15:39:08 +11001386 BUG_ON(bp->b_bn == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 delay(100);
1388 goto again;
1389 }
1390 }
1391 spin_unlock(&hash->bh_lock);
1392 }
1393}
1394
1395/*
Nathan Scottce8e9222006-01-11 15:39:08 +11001396 * Allocate buffer hash table for a given target.
1397 * For devices containing metadata (i.e. not the log/realtime devices)
1398 * we need to allocate a much larger hash table.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 */
1400STATIC void
1401xfs_alloc_bufhash(
1402 xfs_buftarg_t *btp,
1403 int external)
1404{
1405 unsigned int i;
1406
1407 btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */
1408 btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
1409 btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
Vlad Apostolov93c189c2006-11-11 18:03:49 +11001410 sizeof(xfs_bufhash_t), KM_SLEEP | KM_LARGE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1412 spin_lock_init(&btp->bt_hash[i].bh_lock);
1413 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1414 }
1415}
1416
1417STATIC void
1418xfs_free_bufhash(
1419 xfs_buftarg_t *btp)
1420{
Nathan Scottce8e9222006-01-11 15:39:08 +11001421 kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 btp->bt_hash = NULL;
1423}
1424
David Chinnera6867a62006-01-11 15:37:58 +11001425/*
Nathan Scottce8e9222006-01-11 15:39:08 +11001426 * buftarg list for delwrite queue processing
David Chinnera6867a62006-01-11 15:37:58 +11001427 */
1428STATIC LIST_HEAD(xfs_buftarg_list);
1429STATIC DEFINE_SPINLOCK(xfs_buftarg_lock);
1430
1431STATIC void
1432xfs_register_buftarg(
1433 xfs_buftarg_t *btp)
1434{
1435 spin_lock(&xfs_buftarg_lock);
1436 list_add(&btp->bt_list, &xfs_buftarg_list);
1437 spin_unlock(&xfs_buftarg_lock);
1438}
1439
1440STATIC void
1441xfs_unregister_buftarg(
1442 xfs_buftarg_t *btp)
1443{
1444 spin_lock(&xfs_buftarg_lock);
1445 list_del(&btp->bt_list);
1446 spin_unlock(&xfs_buftarg_lock);
1447}
1448
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449void
1450xfs_free_buftarg(
1451 xfs_buftarg_t *btp,
1452 int external)
1453{
1454 xfs_flush_buftarg(btp, 1);
1455 if (external)
Nathan Scottce8e9222006-01-11 15:39:08 +11001456 xfs_blkdev_put(btp->bt_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 xfs_free_bufhash(btp);
Nathan Scottce8e9222006-01-11 15:39:08 +11001458 iput(btp->bt_mapping->host);
David Chinnera6867a62006-01-11 15:37:58 +11001459
Nathan Scottce8e9222006-01-11 15:39:08 +11001460 /* Unregister the buftarg first so that we don't get a
1461 * wakeup finding a non-existent task
1462 */
David Chinnera6867a62006-01-11 15:37:58 +11001463 xfs_unregister_buftarg(btp);
1464 kthread_stop(btp->bt_task);
1465
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 kmem_free(btp, sizeof(*btp));
1467}
1468
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469STATIC int
1470xfs_setsize_buftarg_flags(
1471 xfs_buftarg_t *btp,
1472 unsigned int blocksize,
1473 unsigned int sectorsize,
1474 int verbose)
1475{
Nathan Scottce8e9222006-01-11 15:39:08 +11001476 btp->bt_bsize = blocksize;
1477 btp->bt_sshift = ffs(sectorsize) - 1;
1478 btp->bt_smask = sectorsize - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479
Nathan Scottce8e9222006-01-11 15:39:08 +11001480 if (set_blocksize(btp->bt_bdev, sectorsize)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 printk(KERN_WARNING
1482 "XFS: Cannot set_blocksize to %u on device %s\n",
1483 sectorsize, XFS_BUFTARG_NAME(btp));
1484 return EINVAL;
1485 }
1486
1487 if (verbose &&
1488 (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1489 printk(KERN_WARNING
1490 "XFS: %u byte sectors in use on device %s. "
1491 "This is suboptimal; %u or greater is ideal.\n",
1492 sectorsize, XFS_BUFTARG_NAME(btp),
1493 (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1494 }
1495
1496 return 0;
1497}
1498
1499/*
Nathan Scottce8e9222006-01-11 15:39:08 +11001500 * When allocating the initial buffer target we have not yet
1501 * read in the superblock, so don't know what sized sectors
1502 * are being used is at this early stage. Play safe.
1503 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504STATIC int
1505xfs_setsize_buftarg_early(
1506 xfs_buftarg_t *btp,
1507 struct block_device *bdev)
1508{
1509 return xfs_setsize_buftarg_flags(btp,
1510 PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
1511}
1512
1513int
1514xfs_setsize_buftarg(
1515 xfs_buftarg_t *btp,
1516 unsigned int blocksize,
1517 unsigned int sectorsize)
1518{
1519 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1520}
1521
1522STATIC int
1523xfs_mapping_buftarg(
1524 xfs_buftarg_t *btp,
1525 struct block_device *bdev)
1526{
1527 struct backing_dev_info *bdi;
1528 struct inode *inode;
1529 struct address_space *mapping;
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001530 static const struct address_space_operations mapping_aops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 .sync_page = block_sync_page,
Christoph Lametere965f962006-02-01 03:05:41 -08001532 .migratepage = fail_migrate_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 };
1534
1535 inode = new_inode(bdev->bd_inode->i_sb);
1536 if (!inode) {
1537 printk(KERN_WARNING
1538 "XFS: Cannot allocate mapping inode for device %s\n",
1539 XFS_BUFTARG_NAME(btp));
1540 return ENOMEM;
1541 }
1542 inode->i_mode = S_IFBLK;
1543 inode->i_bdev = bdev;
1544 inode->i_rdev = bdev->bd_dev;
1545 bdi = blk_get_backing_dev_info(bdev);
1546 if (!bdi)
1547 bdi = &default_backing_dev_info;
1548 mapping = &inode->i_data;
1549 mapping->a_ops = &mapping_aops;
1550 mapping->backing_dev_info = bdi;
1551 mapping_set_gfp_mask(mapping, GFP_NOFS);
Nathan Scottce8e9222006-01-11 15:39:08 +11001552 btp->bt_mapping = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 return 0;
1554}
1555
David Chinnera6867a62006-01-11 15:37:58 +11001556STATIC int
1557xfs_alloc_delwrite_queue(
1558 xfs_buftarg_t *btp)
1559{
1560 int error = 0;
1561
1562 INIT_LIST_HEAD(&btp->bt_list);
1563 INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1564 spinlock_init(&btp->bt_delwrite_lock, "delwri_lock");
1565 btp->bt_flags = 0;
1566 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
1567 if (IS_ERR(btp->bt_task)) {
1568 error = PTR_ERR(btp->bt_task);
1569 goto out_error;
1570 }
1571 xfs_register_buftarg(btp);
1572out_error:
1573 return error;
1574}
1575
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576xfs_buftarg_t *
1577xfs_alloc_buftarg(
1578 struct block_device *bdev,
1579 int external)
1580{
1581 xfs_buftarg_t *btp;
1582
1583 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1584
Nathan Scottce8e9222006-01-11 15:39:08 +11001585 btp->bt_dev = bdev->bd_dev;
1586 btp->bt_bdev = bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 if (xfs_setsize_buftarg_early(btp, bdev))
1588 goto error;
1589 if (xfs_mapping_buftarg(btp, bdev))
1590 goto error;
David Chinnera6867a62006-01-11 15:37:58 +11001591 if (xfs_alloc_delwrite_queue(btp))
1592 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 xfs_alloc_bufhash(btp, external);
1594 return btp;
1595
1596error:
1597 kmem_free(btp, sizeof(*btp));
1598 return NULL;
1599}
1600
1601
1602/*
Nathan Scottce8e9222006-01-11 15:39:08 +11001603 * Delayed write buffer handling
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605STATIC void
Nathan Scottce8e9222006-01-11 15:39:08 +11001606xfs_buf_delwri_queue(
1607 xfs_buf_t *bp,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 int unlock)
1609{
Nathan Scottce8e9222006-01-11 15:39:08 +11001610 struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
1611 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
David Chinnera6867a62006-01-11 15:37:58 +11001612
Nathan Scottce8e9222006-01-11 15:39:08 +11001613 XB_TRACE(bp, "delwri_q", (long)unlock);
1614 ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615
David Chinnera6867a62006-01-11 15:37:58 +11001616 spin_lock(dwlk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 /* If already in the queue, dequeue and place at tail */
Nathan Scottce8e9222006-01-11 15:39:08 +11001618 if (!list_empty(&bp->b_list)) {
1619 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1620 if (unlock)
1621 atomic_dec(&bp->b_hold);
1622 list_del(&bp->b_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 }
1624
Nathan Scottce8e9222006-01-11 15:39:08 +11001625 bp->b_flags |= _XBF_DELWRI_Q;
1626 list_add_tail(&bp->b_list, dwq);
1627 bp->b_queuetime = jiffies;
David Chinnera6867a62006-01-11 15:37:58 +11001628 spin_unlock(dwlk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629
1630 if (unlock)
Nathan Scottce8e9222006-01-11 15:39:08 +11001631 xfs_buf_unlock(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632}
1633
1634void
Nathan Scottce8e9222006-01-11 15:39:08 +11001635xfs_buf_delwri_dequeue(
1636 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637{
Nathan Scottce8e9222006-01-11 15:39:08 +11001638 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 int dequeued = 0;
1640
David Chinnera6867a62006-01-11 15:37:58 +11001641 spin_lock(dwlk);
Nathan Scottce8e9222006-01-11 15:39:08 +11001642 if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1643 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1644 list_del_init(&bp->b_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 dequeued = 1;
1646 }
Nathan Scottce8e9222006-01-11 15:39:08 +11001647 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
David Chinnera6867a62006-01-11 15:37:58 +11001648 spin_unlock(dwlk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649
1650 if (dequeued)
Nathan Scottce8e9222006-01-11 15:39:08 +11001651 xfs_buf_rele(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652
Nathan Scottce8e9222006-01-11 15:39:08 +11001653 XB_TRACE(bp, "delwri_dq", (long)dequeued);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654}
1655
1656STATIC void
Nathan Scottce8e9222006-01-11 15:39:08 +11001657xfs_buf_runall_queues(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 struct workqueue_struct *queue)
1659{
1660 flush_workqueue(queue);
1661}
1662
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663STATIC int
Christoph Hellwig23ea4032005-06-21 15:14:01 +10001664xfsbufd_wakeup(
Nathan Scott15c84a42005-11-04 10:51:01 +11001665 int priority,
1666 gfp_t mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667{
Christoph Hellwigda7f93e2006-01-11 20:49:57 +11001668 xfs_buftarg_t *btp;
David Chinnera6867a62006-01-11 15:37:58 +11001669
1670 spin_lock(&xfs_buftarg_lock);
Christoph Hellwigda7f93e2006-01-11 20:49:57 +11001671 list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
Nathan Scottce8e9222006-01-11 15:39:08 +11001672 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
David Chinnera6867a62006-01-11 15:37:58 +11001673 continue;
Nathan Scottce8e9222006-01-11 15:39:08 +11001674 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
David Chinnera6867a62006-01-11 15:37:58 +11001675 wake_up_process(btp->bt_task);
1676 }
1677 spin_unlock(&xfs_buftarg_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 return 0;
1679}
1680
1681STATIC int
Christoph Hellwig23ea4032005-06-21 15:14:01 +10001682xfsbufd(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 void *data)
1684{
1685 struct list_head tmp;
1686 unsigned long age;
David Chinnera6867a62006-01-11 15:37:58 +11001687 xfs_buftarg_t *target = (xfs_buftarg_t *)data;
Nathan Scottce8e9222006-01-11 15:39:08 +11001688 xfs_buf_t *bp, *n;
David Chinnera6867a62006-01-11 15:37:58 +11001689 struct list_head *dwq = &target->bt_delwrite_queue;
1690 spinlock_t *dwlk = &target->bt_delwrite_lock;
Nathan Scottf07c2252006-09-28 10:52:15 +10001691 int count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 current->flags |= PF_MEMALLOC;
1694
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 INIT_LIST_HEAD(&tmp);
1696 do {
Christoph Lameter3e1d1d22005-06-24 23:13:50 -07001697 if (unlikely(freezing(current))) {
Nathan Scottce8e9222006-01-11 15:39:08 +11001698 set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
Christoph Lameter3e1d1d22005-06-24 23:13:50 -07001699 refrigerator();
Nathan Scottabd0cf72005-05-05 13:30:13 -07001700 } else {
Nathan Scottce8e9222006-01-11 15:39:08 +11001701 clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
Nathan Scottabd0cf72005-05-05 13:30:13 -07001702 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
Nathan Scott15c84a42005-11-04 10:51:01 +11001704 schedule_timeout_interruptible(
1705 xfs_buf_timer_centisecs * msecs_to_jiffies(10));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706
Nathan Scottf07c2252006-09-28 10:52:15 +10001707 count = 0;
Nishanth Aravamudan041e0e32005-09-10 00:27:23 -07001708 age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
David Chinnera6867a62006-01-11 15:37:58 +11001709 spin_lock(dwlk);
Nathan Scottce8e9222006-01-11 15:39:08 +11001710 list_for_each_entry_safe(bp, n, dwq, b_list) {
1711 XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
1712 ASSERT(bp->b_flags & XBF_DELWRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713
Nathan Scottce8e9222006-01-11 15:39:08 +11001714 if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
1715 if (!test_bit(XBT_FORCE_FLUSH,
David Chinnera6867a62006-01-11 15:37:58 +11001716 &target->bt_flags) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 time_before(jiffies,
Nathan Scottce8e9222006-01-11 15:39:08 +11001718 bp->b_queuetime + age)) {
1719 xfs_buf_unlock(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 break;
1721 }
1722
Nathan Scottf07c2252006-09-28 10:52:15 +10001723 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
1724 _XBF_RUN_QUEUES);
Nathan Scottce8e9222006-01-11 15:39:08 +11001725 bp->b_flags |= XBF_WRITE;
Nathan Scottf07c2252006-09-28 10:52:15 +10001726 list_move_tail(&bp->b_list, &tmp);
1727 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 }
1729 }
David Chinnera6867a62006-01-11 15:37:58 +11001730 spin_unlock(dwlk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
1732 while (!list_empty(&tmp)) {
Nathan Scottce8e9222006-01-11 15:39:08 +11001733 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1734 ASSERT(target == bp->b_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735
Nathan Scottce8e9222006-01-11 15:39:08 +11001736 list_del_init(&bp->b_list);
1737 xfs_buf_iostrategy(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 }
1739
1740 if (as_list_len > 0)
1741 purge_addresses();
Nathan Scottf07c2252006-09-28 10:52:15 +10001742 if (count)
1743 blk_run_address_space(target->bt_mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744
Nathan Scottce8e9222006-01-11 15:39:08 +11001745 clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
Christoph Hellwig4df08c52005-09-05 08:34:18 +10001746 } while (!kthread_should_stop());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747
Christoph Hellwig4df08c52005-09-05 08:34:18 +10001748 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749}
1750
1751/*
Nathan Scottce8e9222006-01-11 15:39:08 +11001752 * Go through all incore buffers, and release buffers if they belong to
1753 * the given device. This is used in filesystem error handling to
1754 * preserve the consistency of its metadata.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 */
1756int
1757xfs_flush_buftarg(
1758 xfs_buftarg_t *target,
1759 int wait)
1760{
1761 struct list_head tmp;
Nathan Scottce8e9222006-01-11 15:39:08 +11001762 xfs_buf_t *bp, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 int pincount = 0;
David Chinnera6867a62006-01-11 15:37:58 +11001764 struct list_head *dwq = &target->bt_delwrite_queue;
1765 spinlock_t *dwlk = &target->bt_delwrite_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766
Nathan Scottce8e9222006-01-11 15:39:08 +11001767 xfs_buf_runall_queues(xfsdatad_workqueue);
1768 xfs_buf_runall_queues(xfslogd_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769
1770 INIT_LIST_HEAD(&tmp);
David Chinnera6867a62006-01-11 15:37:58 +11001771 spin_lock(dwlk);
Nathan Scottce8e9222006-01-11 15:39:08 +11001772 list_for_each_entry_safe(bp, n, dwq, b_list) {
1773 ASSERT(bp->b_target == target);
1774 ASSERT(bp->b_flags & (XBF_DELWRI | _XBF_DELWRI_Q));
1775 XB_TRACE(bp, "walkq2", (long)xfs_buf_ispin(bp));
1776 if (xfs_buf_ispin(bp)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 pincount++;
1778 continue;
1779 }
1780
Nathan Scottf07c2252006-09-28 10:52:15 +10001781 list_move_tail(&bp->b_list, &tmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 }
David Chinnera6867a62006-01-11 15:37:58 +11001783 spin_unlock(dwlk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784
1785 /*
1786 * Dropped the delayed write list lock, now walk the temporary list
1787 */
Nathan Scottce8e9222006-01-11 15:39:08 +11001788 list_for_each_entry_safe(bp, n, &tmp, b_list) {
1789 xfs_buf_lock(bp);
Nathan Scottf07c2252006-09-28 10:52:15 +10001790 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|_XBF_RUN_QUEUES);
Nathan Scottce8e9222006-01-11 15:39:08 +11001791 bp->b_flags |= XBF_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 if (wait)
Nathan Scottce8e9222006-01-11 15:39:08 +11001793 bp->b_flags &= ~XBF_ASYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 else
Nathan Scottce8e9222006-01-11 15:39:08 +11001795 list_del_init(&bp->b_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796
Nathan Scottce8e9222006-01-11 15:39:08 +11001797 xfs_buf_iostrategy(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 }
1799
Nathan Scottf07c2252006-09-28 10:52:15 +10001800 if (wait)
1801 blk_run_address_space(target->bt_mapping);
1802
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 /*
1804 * Remaining list items must be flushed before returning
1805 */
1806 while (!list_empty(&tmp)) {
Nathan Scottce8e9222006-01-11 15:39:08 +11001807 bp = list_entry(tmp.next, xfs_buf_t, b_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
Nathan Scottce8e9222006-01-11 15:39:08 +11001809 list_del_init(&bp->b_list);
1810 xfs_iowait(bp);
1811 xfs_buf_relse(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 }
1813
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 return pincount;
1815}
1816
Christoph Hellwig04d8b282005-11-02 10:15:05 +11001817int __init
Nathan Scottce8e9222006-01-11 15:39:08 +11001818xfs_buf_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819{
Nathan Scottce8e9222006-01-11 15:39:08 +11001820#ifdef XFS_BUF_TRACE
1821 xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
Christoph Hellwig04d8b282005-11-02 10:15:05 +11001822#endif
1823
Nathan Scott87582802006-03-14 13:18:19 +11001824 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1825 KM_ZONE_HWALIGN, NULL);
Nathan Scottce8e9222006-01-11 15:39:08 +11001826 if (!xfs_buf_zone)
Christoph Hellwig04d8b282005-11-02 10:15:05 +11001827 goto out_free_trace_buf;
1828
Christoph Hellwig23ea4032005-06-21 15:14:01 +10001829 xfslogd_workqueue = create_workqueue("xfslogd");
1830 if (!xfslogd_workqueue)
Christoph Hellwig04d8b282005-11-02 10:15:05 +11001831 goto out_free_buf_zone;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832
Christoph Hellwig23ea4032005-06-21 15:14:01 +10001833 xfsdatad_workqueue = create_workqueue("xfsdatad");
1834 if (!xfsdatad_workqueue)
1835 goto out_destroy_xfslogd_workqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836
Nathan Scottce8e9222006-01-11 15:39:08 +11001837 xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup);
1838 if (!xfs_buf_shake)
David Chinnera6867a62006-01-11 15:37:58 +11001839 goto out_destroy_xfsdatad_workqueue;
Christoph Hellwig04d8b282005-11-02 10:15:05 +11001840
Christoph Hellwig23ea4032005-06-21 15:14:01 +10001841 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842
Christoph Hellwig23ea4032005-06-21 15:14:01 +10001843 out_destroy_xfsdatad_workqueue:
1844 destroy_workqueue(xfsdatad_workqueue);
1845 out_destroy_xfslogd_workqueue:
1846 destroy_workqueue(xfslogd_workqueue);
Christoph Hellwig23ea4032005-06-21 15:14:01 +10001847 out_free_buf_zone:
Nathan Scottce8e9222006-01-11 15:39:08 +11001848 kmem_zone_destroy(xfs_buf_zone);
Christoph Hellwig04d8b282005-11-02 10:15:05 +11001849 out_free_trace_buf:
Nathan Scottce8e9222006-01-11 15:39:08 +11001850#ifdef XFS_BUF_TRACE
1851 ktrace_free(xfs_buf_trace_buf);
Christoph Hellwig23ea4032005-06-21 15:14:01 +10001852#endif
Nathan Scott87582802006-03-14 13:18:19 +11001853 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854}
1855
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856void
Nathan Scottce8e9222006-01-11 15:39:08 +11001857xfs_buf_terminate(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858{
Nathan Scottce8e9222006-01-11 15:39:08 +11001859 kmem_shake_deregister(xfs_buf_shake);
Christoph Hellwig04d8b282005-11-02 10:15:05 +11001860 destroy_workqueue(xfsdatad_workqueue);
1861 destroy_workqueue(xfslogd_workqueue);
Nathan Scottce8e9222006-01-11 15:39:08 +11001862 kmem_zone_destroy(xfs_buf_zone);
1863#ifdef XFS_BUF_TRACE
1864 ktrace_free(xfs_buf_trace_buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866}