blob: 0a767fca0305082ebf7be7fa54bed75a28b98635 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scottf07c2252006-09-28 10:52:15 +10002 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Nathan Scott7b718762005-11-02 14:58:39 +11003 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Vlad Apostolov93c189c2006-11-11 18:03:49 +110018#include "xfs.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/stddef.h>
20#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/pagemap.h>
23#include <linux/init.h>
24#include <linux/vmalloc.h>
25#include <linux/bio.h>
26#include <linux/sysctl.h>
27#include <linux/proc_fs.h>
28#include <linux/workqueue.h>
29#include <linux/percpu.h>
30#include <linux/blkdev.h>
31#include <linux/hash.h>
Christoph Hellwig4df08c52005-09-05 08:34:18 +100032#include <linux/kthread.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080033#include <linux/migrate.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070034#include <linux/backing-dev.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080035#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Christoph Hellwigb7963132009-03-03 14:48:37 -050037#include "xfs_sb.h"
38#include "xfs_inum.h"
Dave Chinnered3b4d62010-05-21 12:07:08 +100039#include "xfs_log.h"
Christoph Hellwigb7963132009-03-03 14:48:37 -050040#include "xfs_ag.h"
Christoph Hellwigb7963132009-03-03 14:48:37 -050041#include "xfs_mount.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000042#include "xfs_trace.h"
Christoph Hellwigb7963132009-03-03 14:48:37 -050043
David Chinner7989cb82007-02-10 18:34:56 +110044static kmem_zone_t *xfs_buf_zone;
David Chinnera6867a62006-01-11 15:37:58 +110045STATIC int xfsbufd(void *);
Christoph Hellwig23ea4032005-06-21 15:14:01 +100046
David Chinner7989cb82007-02-10 18:34:56 +110047static struct workqueue_struct *xfslogd_workqueue;
Christoph Hellwig0829c362005-09-02 16:58:49 +100048struct workqueue_struct *xfsdatad_workqueue;
Dave Chinnerc626d172009-04-06 18:42:11 +020049struct workqueue_struct *xfsconvertd_workqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Nathan Scottce8e9222006-01-11 15:39:08 +110051#ifdef XFS_BUF_LOCK_TRACKING
52# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
53# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
54# define XB_GET_OWNER(bp) ((bp)->b_last_holder)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#else
Nathan Scottce8e9222006-01-11 15:39:08 +110056# define XB_SET_OWNER(bp) do { } while (0)
57# define XB_CLEAR_OWNER(bp) do { } while (0)
58# define XB_GET_OWNER(bp) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#endif
60
Nathan Scottce8e9222006-01-11 15:39:08 +110061#define xb_to_gfp(flags) \
62 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
63 ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Nathan Scottce8e9222006-01-11 15:39:08 +110065#define xb_to_km(flags) \
66 (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
James Bottomley73c77e22010-01-25 11:42:24 -060069static inline int
70xfs_buf_is_vmapped(
71 struct xfs_buf *bp)
72{
73 /*
74 * Return true if the buffer is vmapped.
75 *
76 * The XBF_MAPPED flag is set if the buffer should be mapped, but the
77 * code is clever enough to know it doesn't have to map a single page,
78 * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
79 */
80 return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
81}
82
83static inline int
84xfs_buf_vmap_len(
85 struct xfs_buf *bp)
86{
87 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
88}
89
Linus Torvalds1da177e2005-04-16 15:20:36 -070090/*
Dave Chinner430cbeb2010-12-02 16:30:55 +110091 * xfs_buf_lru_add - add a buffer to the LRU.
92 *
93 * The LRU takes a new reference to the buffer so that it will only be freed
94 * once the shrinker takes the buffer off the LRU.
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 */
Dave Chinner430cbeb2010-12-02 16:30:55 +110096STATIC void
97xfs_buf_lru_add(
98 struct xfs_buf *bp)
99{
100 struct xfs_buftarg *btp = bp->b_target;
101
102 spin_lock(&btp->bt_lru_lock);
103 if (list_empty(&bp->b_lru)) {
104 atomic_inc(&bp->b_hold);
105 list_add_tail(&bp->b_lru, &btp->bt_lru);
106 btp->bt_lru_nr++;
107 }
108 spin_unlock(&btp->bt_lru_lock);
109}
110
111/*
112 * xfs_buf_lru_del - remove a buffer from the LRU
113 *
114 * The unlocked check is safe here because it only occurs when there are not
115 * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
116 * to optimise the shrinker removing the buffer from the LRU and calling
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300117 * xfs_buf_free(). i.e. it removes an unnecessary round trip on the
Dave Chinner430cbeb2010-12-02 16:30:55 +1100118 * bt_lru_lock.
119 */
120STATIC void
121xfs_buf_lru_del(
122 struct xfs_buf *bp)
123{
124 struct xfs_buftarg *btp = bp->b_target;
125
126 if (list_empty(&bp->b_lru))
127 return;
128
129 spin_lock(&btp->bt_lru_lock);
130 if (!list_empty(&bp->b_lru)) {
131 list_del_init(&bp->b_lru);
132 btp->bt_lru_nr--;
133 }
134 spin_unlock(&btp->bt_lru_lock);
135}
136
137/*
138 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
139 * b_lru_ref count so that the buffer is freed immediately when the buffer
140 * reference count falls to zero. If the buffer is already on the LRU, we need
141 * to remove the reference that LRU holds on the buffer.
142 *
143 * This prevents build-up of stale buffers on the LRU.
144 */
145void
146xfs_buf_stale(
147 struct xfs_buf *bp)
148{
149 bp->b_flags |= XBF_STALE;
Christoph Hellwigaf5c4be2011-10-10 16:52:47 +0000150 xfs_buf_delwri_dequeue(bp);
Dave Chinner430cbeb2010-12-02 16:30:55 +1100151 atomic_set(&(bp)->b_lru_ref, 0);
152 if (!list_empty(&bp->b_lru)) {
153 struct xfs_buftarg *btp = bp->b_target;
154
155 spin_lock(&btp->bt_lru_lock);
156 if (!list_empty(&bp->b_lru)) {
157 list_del_init(&bp->b_lru);
158 btp->bt_lru_nr--;
159 atomic_dec(&bp->b_hold);
160 }
161 spin_unlock(&btp->bt_lru_lock);
162 }
163 ASSERT(atomic_read(&bp->b_hold) >= 1);
164}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000166struct xfs_buf *
167xfs_buf_alloc(
168 struct xfs_buftarg *target,
Nathan Scott204ab252006-01-11 20:50:22 +1100169 xfs_off_t range_base,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 size_t range_length,
Nathan Scottce8e9222006-01-11 15:39:08 +1100171 xfs_buf_flags_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172{
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000173 struct xfs_buf *bp;
174
175 bp = kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags));
176 if (unlikely(!bp))
177 return NULL;
178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 /*
Nathan Scottce8e9222006-01-11 15:39:08 +1100180 * We don't want certain flags to appear in b_flags.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 */
Nathan Scottce8e9222006-01-11 15:39:08 +1100182 flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Nathan Scottce8e9222006-01-11 15:39:08 +1100184 memset(bp, 0, sizeof(xfs_buf_t));
185 atomic_set(&bp->b_hold, 1);
Dave Chinner430cbeb2010-12-02 16:30:55 +1100186 atomic_set(&bp->b_lru_ref, 1);
David Chinnerb4dd3302008-08-13 16:36:11 +1000187 init_completion(&bp->b_iowait);
Dave Chinner430cbeb2010-12-02 16:30:55 +1100188 INIT_LIST_HEAD(&bp->b_lru);
Nathan Scottce8e9222006-01-11 15:39:08 +1100189 INIT_LIST_HEAD(&bp->b_list);
Dave Chinner74f75a02010-09-24 19:59:04 +1000190 RB_CLEAR_NODE(&bp->b_rbnode);
Thomas Gleixnera731cd12010-09-07 14:33:15 +0000191 sema_init(&bp->b_sema, 0); /* held, no waiters */
Nathan Scottce8e9222006-01-11 15:39:08 +1100192 XB_SET_OWNER(bp);
193 bp->b_target = target;
194 bp->b_file_offset = range_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 /*
196 * Set buffer_length and count_desired to the same value initially.
197 * I/O routines should use count_desired, which will be the same in
198 * most cases but may be reset (e.g. XFS recovery).
199 */
Nathan Scottce8e9222006-01-11 15:39:08 +1100200 bp->b_buffer_length = bp->b_count_desired = range_length;
201 bp->b_flags = flags;
202 bp->b_bn = XFS_BUF_DADDR_NULL;
203 atomic_set(&bp->b_pin_count, 0);
204 init_waitqueue_head(&bp->b_waiters);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
Nathan Scottce8e9222006-01-11 15:39:08 +1100206 XFS_STATS_INC(xb_create);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000207 trace_xfs_buf_init(bp, _RET_IP_);
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000208
209 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210}
211
212/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100213 * Allocate a page array capable of holding a specified number
214 * of pages, and point the page buf at it.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 */
216STATIC int
Nathan Scottce8e9222006-01-11 15:39:08 +1100217_xfs_buf_get_pages(
218 xfs_buf_t *bp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 int page_count,
Nathan Scottce8e9222006-01-11 15:39:08 +1100220 xfs_buf_flags_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221{
222 /* Make sure that we have a page list */
Nathan Scottce8e9222006-01-11 15:39:08 +1100223 if (bp->b_pages == NULL) {
224 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
225 bp->b_page_count = page_count;
226 if (page_count <= XB_PAGES) {
227 bp->b_pages = bp->b_page_array;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 } else {
Nathan Scottce8e9222006-01-11 15:39:08 +1100229 bp->b_pages = kmem_alloc(sizeof(struct page *) *
230 page_count, xb_to_km(flags));
231 if (bp->b_pages == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 return -ENOMEM;
233 }
Nathan Scottce8e9222006-01-11 15:39:08 +1100234 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 }
236 return 0;
237}
238
239/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100240 * Frees b_pages if it was allocated.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 */
242STATIC void
Nathan Scottce8e9222006-01-11 15:39:08 +1100243_xfs_buf_free_pages(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 xfs_buf_t *bp)
245{
Nathan Scottce8e9222006-01-11 15:39:08 +1100246 if (bp->b_pages != bp->b_page_array) {
Denys Vlasenkof0e2d932008-05-19 16:31:57 +1000247 kmem_free(bp->b_pages);
Dave Chinner3fc98b12009-12-14 23:11:57 +0000248 bp->b_pages = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 }
250}
251
252/*
253 * Releases the specified buffer.
254 *
255 * The modification state of any associated pages is left unchanged.
Nathan Scottce8e9222006-01-11 15:39:08 +1100256 * The buffer most not be on any hash - use xfs_buf_rele instead for
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 * hashed and refcounted buffers
258 */
259void
Nathan Scottce8e9222006-01-11 15:39:08 +1100260xfs_buf_free(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 xfs_buf_t *bp)
262{
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000263 trace_xfs_buf_free(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
Dave Chinner430cbeb2010-12-02 16:30:55 +1100265 ASSERT(list_empty(&bp->b_lru));
266
Dave Chinner0e6e8472011-03-26 09:16:45 +1100267 if (bp->b_flags & _XBF_PAGES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 uint i;
269
James Bottomley73c77e22010-01-25 11:42:24 -0600270 if (xfs_buf_is_vmapped(bp))
Alex Elder8a262e52010-03-16 18:55:56 +0000271 vm_unmap_ram(bp->b_addr - bp->b_offset,
272 bp->b_page_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
Nathan Scott948ecdb2006-09-28 11:03:13 +1000274 for (i = 0; i < bp->b_page_count; i++) {
275 struct page *page = bp->b_pages[i];
276
Dave Chinner0e6e8472011-03-26 09:16:45 +1100277 __free_page(page);
Nathan Scott948ecdb2006-09-28 11:03:13 +1000278 }
Dave Chinner0e6e8472011-03-26 09:16:45 +1100279 } else if (bp->b_flags & _XBF_KMEM)
280 kmem_free(bp->b_addr);
Dave Chinner3fc98b12009-12-14 23:11:57 +0000281 _xfs_buf_free_pages(bp);
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000282 kmem_zone_free(xfs_buf_zone, bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283}
284
285/*
Dave Chinner0e6e8472011-03-26 09:16:45 +1100286 * Allocates all the pages for buffer in question and builds it's page list.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 */
288STATIC int
Dave Chinner0e6e8472011-03-26 09:16:45 +1100289xfs_buf_allocate_memory(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 xfs_buf_t *bp,
291 uint flags)
292{
Nathan Scottce8e9222006-01-11 15:39:08 +1100293 size_t size = bp->b_count_desired;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 size_t nbytes, offset;
Nathan Scottce8e9222006-01-11 15:39:08 +1100295 gfp_t gfp_mask = xb_to_gfp(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 unsigned short page_count, i;
Nathan Scott204ab252006-01-11 20:50:22 +1100297 xfs_off_t end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 int error;
299
Dave Chinner0e6e8472011-03-26 09:16:45 +1100300 /*
301 * for buffers that are contained within a single page, just allocate
302 * the memory from the heap - there's no need for the complexity of
303 * page arrays to keep allocation down to order 0.
304 */
305 if (bp->b_buffer_length < PAGE_SIZE) {
306 bp->b_addr = kmem_alloc(bp->b_buffer_length, xb_to_km(flags));
307 if (!bp->b_addr) {
308 /* low memory - use alloc_page loop instead */
309 goto use_alloc_page;
310 }
311
312 if (((unsigned long)(bp->b_addr + bp->b_buffer_length - 1) &
313 PAGE_MASK) !=
314 ((unsigned long)bp->b_addr & PAGE_MASK)) {
315 /* b_addr spans two pages - use alloc_page instead */
316 kmem_free(bp->b_addr);
317 bp->b_addr = NULL;
318 goto use_alloc_page;
319 }
320 bp->b_offset = offset_in_page(bp->b_addr);
321 bp->b_pages = bp->b_page_array;
322 bp->b_pages[0] = virt_to_page(bp->b_addr);
323 bp->b_page_count = 1;
324 bp->b_flags |= XBF_MAPPED | _XBF_KMEM;
325 return 0;
326 }
327
328use_alloc_page:
Nathan Scottce8e9222006-01-11 15:39:08 +1100329 end = bp->b_file_offset + bp->b_buffer_length;
330 page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
Nathan Scottce8e9222006-01-11 15:39:08 +1100331 error = _xfs_buf_get_pages(bp, page_count, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 if (unlikely(error))
333 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
Nathan Scottce8e9222006-01-11 15:39:08 +1100335 offset = bp->b_offset;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100336 bp->b_flags |= _XBF_PAGES;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
Nathan Scottce8e9222006-01-11 15:39:08 +1100338 for (i = 0; i < bp->b_page_count; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 struct page *page;
340 uint retries = 0;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100341retry:
342 page = alloc_page(gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 if (unlikely(page == NULL)) {
Nathan Scottce8e9222006-01-11 15:39:08 +1100344 if (flags & XBF_READ_AHEAD) {
345 bp->b_page_count = i;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100346 error = ENOMEM;
347 goto out_free_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 }
349
350 /*
351 * This could deadlock.
352 *
353 * But until all the XFS lowlevel code is revamped to
354 * handle buffer allocation failures we can't do much.
355 */
356 if (!(++retries % 100))
Dave Chinner4f107002011-03-07 10:00:35 +1100357 xfs_err(NULL,
358 "possible memory allocation deadlock in %s (mode:0x%x)",
Harvey Harrison34a622b2008-04-10 12:19:21 +1000359 __func__, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
Nathan Scottce8e9222006-01-11 15:39:08 +1100361 XFS_STATS_INC(xb_page_retries);
Jens Axboe8aa7e842009-07-09 14:52:32 +0200362 congestion_wait(BLK_RW_ASYNC, HZ/50);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 goto retry;
364 }
365
Nathan Scottce8e9222006-01-11 15:39:08 +1100366 XFS_STATS_INC(xb_page_found);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
Dave Chinner0e6e8472011-03-26 09:16:45 +1100368 nbytes = min_t(size_t, size, PAGE_SIZE - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 size -= nbytes;
Nathan Scottce8e9222006-01-11 15:39:08 +1100370 bp->b_pages[i] = page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 offset = 0;
372 }
Dave Chinner0e6e8472011-03-26 09:16:45 +1100373 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
Dave Chinner0e6e8472011-03-26 09:16:45 +1100375out_free_pages:
376 for (i = 0; i < bp->b_page_count; i++)
377 __free_page(bp->b_pages[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 return error;
379}
380
381/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300382 * Map buffer into kernel address-space if necessary.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 */
384STATIC int
Nathan Scottce8e9222006-01-11 15:39:08 +1100385_xfs_buf_map_pages(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 xfs_buf_t *bp,
387 uint flags)
388{
Dave Chinner0e6e8472011-03-26 09:16:45 +1100389 ASSERT(bp->b_flags & _XBF_PAGES);
Nathan Scottce8e9222006-01-11 15:39:08 +1100390 if (bp->b_page_count == 1) {
Dave Chinner0e6e8472011-03-26 09:16:45 +1100391 /* A single page buffer is always mappable */
Nathan Scottce8e9222006-01-11 15:39:08 +1100392 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
393 bp->b_flags |= XBF_MAPPED;
394 } else if (flags & XBF_MAPPED) {
Dave Chinnera19fb382011-03-26 09:13:42 +1100395 int retried = 0;
396
397 do {
398 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
399 -1, PAGE_KERNEL);
400 if (bp->b_addr)
401 break;
402 vm_unmap_aliases();
403 } while (retried++ <= 1);
404
405 if (!bp->b_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 return -ENOMEM;
Nathan Scottce8e9222006-01-11 15:39:08 +1100407 bp->b_addr += bp->b_offset;
408 bp->b_flags |= XBF_MAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 }
410
411 return 0;
412}
413
414/*
415 * Finding and Reading Buffers
416 */
417
418/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100419 * Look up, and creates if absent, a lockable buffer for
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 * a given range of an inode. The buffer is returned
Chandra Seetharamaneabbaf12011-09-08 20:18:50 +0000421 * locked. No I/O is implied by this call.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 */
423xfs_buf_t *
Nathan Scottce8e9222006-01-11 15:39:08 +1100424_xfs_buf_find(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 xfs_buftarg_t *btp, /* block device target */
Nathan Scott204ab252006-01-11 20:50:22 +1100426 xfs_off_t ioff, /* starting offset of range */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 size_t isize, /* length of range */
Nathan Scottce8e9222006-01-11 15:39:08 +1100428 xfs_buf_flags_t flags,
429 xfs_buf_t *new_bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430{
Nathan Scott204ab252006-01-11 20:50:22 +1100431 xfs_off_t range_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 size_t range_length;
Dave Chinner74f75a02010-09-24 19:59:04 +1000433 struct xfs_perag *pag;
434 struct rb_node **rbp;
435 struct rb_node *parent;
436 xfs_buf_t *bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
438 range_base = (ioff << BBSHIFT);
439 range_length = (isize << BBSHIFT);
440
441 /* Check for IOs smaller than the sector size / not sector aligned */
Nathan Scottce8e9222006-01-11 15:39:08 +1100442 ASSERT(!(range_length < (1 << btp->bt_sshift)));
Nathan Scott204ab252006-01-11 20:50:22 +1100443 ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
Dave Chinner74f75a02010-09-24 19:59:04 +1000445 /* get tree root */
446 pag = xfs_perag_get(btp->bt_mount,
447 xfs_daddr_to_agno(btp->bt_mount, ioff));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
Dave Chinner74f75a02010-09-24 19:59:04 +1000449 /* walk tree */
450 spin_lock(&pag->pag_buf_lock);
451 rbp = &pag->pag_buf_tree.rb_node;
452 parent = NULL;
453 bp = NULL;
454 while (*rbp) {
455 parent = *rbp;
456 bp = rb_entry(parent, struct xfs_buf, b_rbnode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457
Dave Chinner74f75a02010-09-24 19:59:04 +1000458 if (range_base < bp->b_file_offset)
459 rbp = &(*rbp)->rb_left;
460 else if (range_base > bp->b_file_offset)
461 rbp = &(*rbp)->rb_right;
462 else {
463 /*
464 * found a block offset match. If the range doesn't
465 * match, the only way this is allowed is if the buffer
466 * in the cache is stale and the transaction that made
467 * it stale has not yet committed. i.e. we are
468 * reallocating a busy extent. Skip this buffer and
469 * continue searching to the right for an exact match.
470 */
471 if (bp->b_buffer_length != range_length) {
472 ASSERT(bp->b_flags & XBF_STALE);
473 rbp = &(*rbp)->rb_right;
474 continue;
475 }
Nathan Scottce8e9222006-01-11 15:39:08 +1100476 atomic_inc(&bp->b_hold);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 goto found;
478 }
479 }
480
481 /* No match found */
Nathan Scottce8e9222006-01-11 15:39:08 +1100482 if (new_bp) {
Dave Chinner74f75a02010-09-24 19:59:04 +1000483 rb_link_node(&new_bp->b_rbnode, parent, rbp);
484 rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
485 /* the buffer keeps the perag reference until it is freed */
486 new_bp->b_pag = pag;
487 spin_unlock(&pag->pag_buf_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 } else {
Nathan Scottce8e9222006-01-11 15:39:08 +1100489 XFS_STATS_INC(xb_miss_locked);
Dave Chinner74f75a02010-09-24 19:59:04 +1000490 spin_unlock(&pag->pag_buf_lock);
491 xfs_perag_put(pag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 }
Nathan Scottce8e9222006-01-11 15:39:08 +1100493 return new_bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
495found:
Dave Chinner74f75a02010-09-24 19:59:04 +1000496 spin_unlock(&pag->pag_buf_lock);
497 xfs_perag_put(pag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200499 if (!xfs_buf_trylock(bp)) {
500 if (flags & XBF_TRYLOCK) {
Nathan Scottce8e9222006-01-11 15:39:08 +1100501 xfs_buf_rele(bp);
502 XFS_STATS_INC(xb_busy_locked);
503 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 }
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200505 xfs_buf_lock(bp);
506 XFS_STATS_INC(xb_get_locked_waited);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 }
508
Dave Chinner0e6e8472011-03-26 09:16:45 +1100509 /*
510 * if the buffer is stale, clear all the external state associated with
511 * it. We need to keep flags such as how we allocated the buffer memory
512 * intact here.
513 */
Nathan Scottce8e9222006-01-11 15:39:08 +1100514 if (bp->b_flags & XBF_STALE) {
515 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
Dave Chinner0e6e8472011-03-26 09:16:45 +1100516 bp->b_flags &= XBF_MAPPED | _XBF_KMEM | _XBF_PAGES;
David Chinner2f926582005-09-05 08:33:35 +1000517 }
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000518
519 trace_xfs_buf_find(bp, flags, _RET_IP_);
Nathan Scottce8e9222006-01-11 15:39:08 +1100520 XFS_STATS_INC(xb_get_locked);
521 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522}
523
524/*
Dave Chinner38158322011-09-30 04:45:02 +0000525 * Assembles a buffer covering the specified range. The code is optimised for
526 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
527 * more hits than misses.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 */
Dave Chinner38158322011-09-30 04:45:02 +0000529struct xfs_buf *
Christoph Hellwig6ad112b2009-11-24 18:02:23 +0000530xfs_buf_get(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 xfs_buftarg_t *target,/* target for buffer */
Nathan Scott204ab252006-01-11 20:50:22 +1100532 xfs_off_t ioff, /* starting offset of range */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 size_t isize, /* length of range */
Nathan Scottce8e9222006-01-11 15:39:08 +1100534 xfs_buf_flags_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535{
Dave Chinner38158322011-09-30 04:45:02 +0000536 struct xfs_buf *bp;
537 struct xfs_buf *new_bp;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100538 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539
Dave Chinner38158322011-09-30 04:45:02 +0000540 bp = _xfs_buf_find(target, ioff, isize, flags, NULL);
541 if (likely(bp))
542 goto found;
543
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000544 new_bp = xfs_buf_alloc(target, ioff << BBSHIFT, isize << BBSHIFT,
545 flags);
Nathan Scottce8e9222006-01-11 15:39:08 +1100546 if (unlikely(!new_bp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 return NULL;
548
Nathan Scottce8e9222006-01-11 15:39:08 +1100549 bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
Dave Chinner38158322011-09-30 04:45:02 +0000550 if (!bp) {
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000551 kmem_zone_free(xfs_buf_zone, new_bp);
Dave Chinner38158322011-09-30 04:45:02 +0000552 return NULL;
553 }
554
Nathan Scottce8e9222006-01-11 15:39:08 +1100555 if (bp == new_bp) {
Dave Chinner0e6e8472011-03-26 09:16:45 +1100556 error = xfs_buf_allocate_memory(bp, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 if (error)
558 goto no_buffer;
Dave Chinner38158322011-09-30 04:45:02 +0000559 } else
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000560 kmem_zone_free(xfs_buf_zone, new_bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
Dave Chinner38158322011-09-30 04:45:02 +0000562 /*
563 * Now we have a workable buffer, fill in the block number so
564 * that we can do IO on it.
565 */
566 bp->b_bn = ioff;
567 bp->b_count_desired = bp->b_buffer_length;
568
569found:
Nathan Scottce8e9222006-01-11 15:39:08 +1100570 if (!(bp->b_flags & XBF_MAPPED)) {
571 error = _xfs_buf_map_pages(bp, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 if (unlikely(error)) {
Dave Chinner4f107002011-03-07 10:00:35 +1100573 xfs_warn(target->bt_mount,
574 "%s: failed to map pages\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 goto no_buffer;
576 }
577 }
578
Nathan Scottce8e9222006-01-11 15:39:08 +1100579 XFS_STATS_INC(xb_get);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000580 trace_xfs_buf_get(bp, flags, _RET_IP_);
Nathan Scottce8e9222006-01-11 15:39:08 +1100581 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
Dave Chinner38158322011-09-30 04:45:02 +0000583no_buffer:
Nathan Scottce8e9222006-01-11 15:39:08 +1100584 if (flags & (XBF_LOCK | XBF_TRYLOCK))
585 xfs_buf_unlock(bp);
586 xfs_buf_rele(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 return NULL;
588}
589
Christoph Hellwig5d765b92008-12-03 12:20:26 +0100590STATIC int
591_xfs_buf_read(
592 xfs_buf_t *bp,
593 xfs_buf_flags_t flags)
594{
595 int status;
596
Christoph Hellwig5d765b92008-12-03 12:20:26 +0100597 ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
598 ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
599
Christoph Hellwig1d5ae5d2011-07-08 14:36:32 +0200600 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | XBF_READ_AHEAD);
601 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
Christoph Hellwig5d765b92008-12-03 12:20:26 +0100602
603 status = xfs_buf_iorequest(bp);
Chandra Seetharaman5a52c2a582011-07-22 23:39:51 +0000604 if (status || bp->b_error || (flags & XBF_ASYNC))
Dave Chinnerec53d1d2010-07-20 17:52:59 +1000605 return status;
606 return xfs_buf_iowait(bp);
Christoph Hellwig5d765b92008-12-03 12:20:26 +0100607}
608
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609xfs_buf_t *
Christoph Hellwig6ad112b2009-11-24 18:02:23 +0000610xfs_buf_read(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 xfs_buftarg_t *target,
Nathan Scott204ab252006-01-11 20:50:22 +1100612 xfs_off_t ioff,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 size_t isize,
Nathan Scottce8e9222006-01-11 15:39:08 +1100614 xfs_buf_flags_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615{
Nathan Scottce8e9222006-01-11 15:39:08 +1100616 xfs_buf_t *bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
Nathan Scottce8e9222006-01-11 15:39:08 +1100618 flags |= XBF_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619
Christoph Hellwig6ad112b2009-11-24 18:02:23 +0000620 bp = xfs_buf_get(target, ioff, isize, flags);
Nathan Scottce8e9222006-01-11 15:39:08 +1100621 if (bp) {
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000622 trace_xfs_buf_read(bp, flags, _RET_IP_);
623
Nathan Scottce8e9222006-01-11 15:39:08 +1100624 if (!XFS_BUF_ISDONE(bp)) {
Nathan Scottce8e9222006-01-11 15:39:08 +1100625 XFS_STATS_INC(xb_get_read);
Christoph Hellwig5d765b92008-12-03 12:20:26 +0100626 _xfs_buf_read(bp, flags);
Nathan Scottce8e9222006-01-11 15:39:08 +1100627 } else if (flags & XBF_ASYNC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 /*
629 * Read ahead call which is already satisfied,
630 * drop the buffer
631 */
632 goto no_buffer;
633 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 /* We do not want read in the flags */
Nathan Scottce8e9222006-01-11 15:39:08 +1100635 bp->b_flags &= ~XBF_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 }
637 }
638
Nathan Scottce8e9222006-01-11 15:39:08 +1100639 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640
641 no_buffer:
Nathan Scottce8e9222006-01-11 15:39:08 +1100642 if (flags & (XBF_LOCK | XBF_TRYLOCK))
643 xfs_buf_unlock(bp);
644 xfs_buf_rele(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 return NULL;
646}
647
648/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100649 * If we are not low on memory then do the readahead in a deadlock
650 * safe manner.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 */
652void
Nathan Scottce8e9222006-01-11 15:39:08 +1100653xfs_buf_readahead(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 xfs_buftarg_t *target,
Nathan Scott204ab252006-01-11 20:50:22 +1100655 xfs_off_t ioff,
Christoph Hellwig1a1a3e92010-10-06 18:41:18 +0000656 size_t isize)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657{
Dave Chinner0e6e8472011-03-26 09:16:45 +1100658 if (bdi_read_congested(target->bt_bdi))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 return;
660
Christoph Hellwig1a1a3e92010-10-06 18:41:18 +0000661 xfs_buf_read(target, ioff, isize,
662 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663}
664
Dave Chinner5adc94c2010-09-24 21:58:31 +1000665/*
666 * Read an uncached buffer from disk. Allocates and returns a locked
667 * buffer containing the disk contents or nothing.
668 */
669struct xfs_buf *
670xfs_buf_read_uncached(
671 struct xfs_mount *mp,
672 struct xfs_buftarg *target,
673 xfs_daddr_t daddr,
674 size_t length,
675 int flags)
676{
677 xfs_buf_t *bp;
678 int error;
679
680 bp = xfs_buf_get_uncached(target, length, flags);
681 if (!bp)
682 return NULL;
683
684 /* set up the buffer for a read IO */
Dave Chinner5adc94c2010-09-24 21:58:31 +1000685 XFS_BUF_SET_ADDR(bp, daddr);
686 XFS_BUF_READ(bp);
Dave Chinner5adc94c2010-09-24 21:58:31 +1000687
688 xfsbdstrat(mp, bp);
Christoph Hellwig1a1a3e92010-10-06 18:41:18 +0000689 error = xfs_buf_iowait(bp);
Dave Chinner5adc94c2010-09-24 21:58:31 +1000690 if (error || bp->b_error) {
691 xfs_buf_relse(bp);
692 return NULL;
693 }
694 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695}
696
Dave Chinner44396472011-04-21 09:34:27 +0000697/*
698 * Return a buffer allocated as an empty buffer and associated to external
699 * memory via xfs_buf_associate_memory() back to it's empty state.
700 */
701void
702xfs_buf_set_empty(
703 struct xfs_buf *bp,
704 size_t len)
705{
706 if (bp->b_pages)
707 _xfs_buf_free_pages(bp);
708
709 bp->b_pages = NULL;
710 bp->b_page_count = 0;
711 bp->b_addr = NULL;
712 bp->b_file_offset = 0;
713 bp->b_buffer_length = bp->b_count_desired = len;
714 bp->b_bn = XFS_BUF_DADDR_NULL;
715 bp->b_flags &= ~XBF_MAPPED;
716}
717
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718static inline struct page *
719mem_to_page(
720 void *addr)
721{
Christoph Lameter9e2779f2008-02-04 22:28:34 -0800722 if ((!is_vmalloc_addr(addr))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 return virt_to_page(addr);
724 } else {
725 return vmalloc_to_page(addr);
726 }
727}
728
729int
Nathan Scottce8e9222006-01-11 15:39:08 +1100730xfs_buf_associate_memory(
731 xfs_buf_t *bp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 void *mem,
733 size_t len)
734{
735 int rval;
736 int i = 0;
Lachlan McIlroyd1afb672007-11-27 17:01:24 +1100737 unsigned long pageaddr;
738 unsigned long offset;
739 size_t buflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 int page_count;
741
Dave Chinner0e6e8472011-03-26 09:16:45 +1100742 pageaddr = (unsigned long)mem & PAGE_MASK;
Lachlan McIlroyd1afb672007-11-27 17:01:24 +1100743 offset = (unsigned long)mem - pageaddr;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100744 buflen = PAGE_ALIGN(len + offset);
745 page_count = buflen >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746
747 /* Free any previous set of page pointers */
Nathan Scottce8e9222006-01-11 15:39:08 +1100748 if (bp->b_pages)
749 _xfs_buf_free_pages(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750
Nathan Scottce8e9222006-01-11 15:39:08 +1100751 bp->b_pages = NULL;
752 bp->b_addr = mem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
Christoph Hellwig36fae172009-07-18 18:14:58 -0400754 rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 if (rval)
756 return rval;
757
Nathan Scottce8e9222006-01-11 15:39:08 +1100758 bp->b_offset = offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759
Lachlan McIlroyd1afb672007-11-27 17:01:24 +1100760 for (i = 0; i < bp->b_page_count; i++) {
761 bp->b_pages[i] = mem_to_page((void *)pageaddr);
Dave Chinner0e6e8472011-03-26 09:16:45 +1100762 pageaddr += PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764
Lachlan McIlroyd1afb672007-11-27 17:01:24 +1100765 bp->b_count_desired = len;
766 bp->b_buffer_length = buflen;
Nathan Scottce8e9222006-01-11 15:39:08 +1100767 bp->b_flags |= XBF_MAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768
769 return 0;
770}
771
772xfs_buf_t *
Dave Chinner686865f2010-09-24 20:07:47 +1000773xfs_buf_get_uncached(
774 struct xfs_buftarg *target,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 size_t len,
Dave Chinner686865f2010-09-24 20:07:47 +1000776 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777{
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000778 unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
779 int error, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 xfs_buf_t *bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000782 bp = xfs_buf_alloc(target, 0, len, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 if (unlikely(bp == NULL))
784 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000786 error = _xfs_buf_get_pages(bp, page_count, 0);
787 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 goto fail_free_buf;
789
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000790 for (i = 0; i < page_count; i++) {
Dave Chinner686865f2010-09-24 20:07:47 +1000791 bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000792 if (!bp->b_pages[i])
793 goto fail_free_mem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 }
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000795 bp->b_flags |= _XBF_PAGES;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000797 error = _xfs_buf_map_pages(bp, XBF_MAPPED);
798 if (unlikely(error)) {
Dave Chinner4f107002011-03-07 10:00:35 +1100799 xfs_warn(target->bt_mount,
800 "%s: failed to map pages\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 goto fail_free_mem;
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000802 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803
Dave Chinner686865f2010-09-24 20:07:47 +1000804 trace_xfs_buf_get_uncached(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 return bp;
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000806
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 fail_free_mem:
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000808 while (--i >= 0)
809 __free_page(bp->b_pages[i]);
Christoph Hellwigca165b82007-05-24 15:21:11 +1000810 _xfs_buf_free_pages(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 fail_free_buf:
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000812 kmem_zone_free(xfs_buf_zone, bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 fail:
814 return NULL;
815}
816
817/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 * Increment reference count on buffer, to hold the buffer concurrently
819 * with another thread which may release (free) the buffer asynchronously.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 * Must hold the buffer already to call this function.
821 */
822void
Nathan Scottce8e9222006-01-11 15:39:08 +1100823xfs_buf_hold(
824 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825{
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000826 trace_xfs_buf_hold(bp, _RET_IP_);
Nathan Scottce8e9222006-01-11 15:39:08 +1100827 atomic_inc(&bp->b_hold);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828}
829
830/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100831 * Releases a hold on the specified buffer. If the
832 * the hold count is 1, calls xfs_buf_free.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 */
834void
Nathan Scottce8e9222006-01-11 15:39:08 +1100835xfs_buf_rele(
836 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837{
Dave Chinner74f75a02010-09-24 19:59:04 +1000838 struct xfs_perag *pag = bp->b_pag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000840 trace_xfs_buf_rele(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841
Dave Chinner74f75a02010-09-24 19:59:04 +1000842 if (!pag) {
Dave Chinner430cbeb2010-12-02 16:30:55 +1100843 ASSERT(list_empty(&bp->b_lru));
Dave Chinner74f75a02010-09-24 19:59:04 +1000844 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
Nathan Scottfad3aa12006-02-01 12:14:52 +1100845 if (atomic_dec_and_test(&bp->b_hold))
846 xfs_buf_free(bp);
847 return;
848 }
849
Dave Chinner74f75a02010-09-24 19:59:04 +1000850 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
Dave Chinner430cbeb2010-12-02 16:30:55 +1100851
Lachlan McIlroy37906892008-08-13 15:42:10 +1000852 ASSERT(atomic_read(&bp->b_hold) > 0);
Dave Chinner74f75a02010-09-24 19:59:04 +1000853 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
Christoph Hellwigbfc60172011-01-07 13:02:23 +0000854 if (!(bp->b_flags & XBF_STALE) &&
Dave Chinner430cbeb2010-12-02 16:30:55 +1100855 atomic_read(&bp->b_lru_ref)) {
856 xfs_buf_lru_add(bp);
857 spin_unlock(&pag->pag_buf_lock);
Christoph Hellwig7f14d0a2005-11-02 15:09:35 +1100858 } else {
Dave Chinner430cbeb2010-12-02 16:30:55 +1100859 xfs_buf_lru_del(bp);
Nathan Scottce8e9222006-01-11 15:39:08 +1100860 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
Dave Chinner74f75a02010-09-24 19:59:04 +1000861 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
862 spin_unlock(&pag->pag_buf_lock);
863 xfs_perag_put(pag);
Nathan Scottce8e9222006-01-11 15:39:08 +1100864 xfs_buf_free(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 }
866 }
867}
868
869
870/*
Dave Chinner0e6e8472011-03-26 09:16:45 +1100871 * Lock a buffer object, if it is not already locked.
Dave Chinner90810b92010-11-30 15:16:16 +1100872 *
873 * If we come across a stale, pinned, locked buffer, we know that we are
874 * being asked to lock a buffer that has been reallocated. Because it is
875 * pinned, we know that the log has not been pushed to disk and hence it
876 * will still be locked. Rather than continuing to have trylock attempts
877 * fail until someone else pushes the log, push it ourselves before
878 * returning. This means that the xfsaild will not get stuck trying
879 * to push on stale inode buffers.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 */
881int
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200882xfs_buf_trylock(
883 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884{
885 int locked;
886
Nathan Scottce8e9222006-01-11 15:39:08 +1100887 locked = down_trylock(&bp->b_sema) == 0;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000888 if (locked)
Nathan Scottce8e9222006-01-11 15:39:08 +1100889 XB_SET_OWNER(bp);
Dave Chinner90810b92010-11-30 15:16:16 +1100890 else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
891 xfs_log_force(bp->b_target->bt_mount, 0);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000892
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200893 trace_xfs_buf_trylock(bp, _RET_IP_);
894 return locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896
897/*
Dave Chinner0e6e8472011-03-26 09:16:45 +1100898 * Lock a buffer object.
Dave Chinnered3b4d62010-05-21 12:07:08 +1000899 *
900 * If we come across a stale, pinned, locked buffer, we know that we
901 * are being asked to lock a buffer that has been reallocated. Because
902 * it is pinned, we know that the log has not been pushed to disk and
903 * hence it will still be locked. Rather than sleeping until someone
904 * else pushes the log, push it ourselves before trying to get the lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 */
Nathan Scottce8e9222006-01-11 15:39:08 +1100906void
907xfs_buf_lock(
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200908 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909{
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000910 trace_xfs_buf_lock(bp, _RET_IP_);
911
Dave Chinnered3b4d62010-05-21 12:07:08 +1000912 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
Dave Chinnerebad8612010-09-22 10:47:20 +1000913 xfs_log_force(bp->b_target->bt_mount, 0);
Nathan Scottce8e9222006-01-11 15:39:08 +1100914 down(&bp->b_sema);
915 XB_SET_OWNER(bp);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000916
917 trace_xfs_buf_lock_done(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918}
919
920/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100921 * Releases the lock on the buffer object.
David Chinner2f926582005-09-05 08:33:35 +1000922 * If the buffer is marked delwri but is not queued, do so before we
Nathan Scottce8e9222006-01-11 15:39:08 +1100923 * unlock the buffer as we need to set flags correctly. We also need to
David Chinner2f926582005-09-05 08:33:35 +1000924 * take a reference for the delwri queue because the unlocker is going to
925 * drop their's and they don't know we just queued it.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 */
927void
Nathan Scottce8e9222006-01-11 15:39:08 +1100928xfs_buf_unlock(
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200929 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930{
Nathan Scottce8e9222006-01-11 15:39:08 +1100931 XB_CLEAR_OWNER(bp);
932 up(&bp->b_sema);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000933
934 trace_xfs_buf_unlock(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935}
936
Nathan Scottce8e9222006-01-11 15:39:08 +1100937STATIC void
938xfs_buf_wait_unpin(
939 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940{
941 DECLARE_WAITQUEUE (wait, current);
942
Nathan Scottce8e9222006-01-11 15:39:08 +1100943 if (atomic_read(&bp->b_pin_count) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 return;
945
Nathan Scottce8e9222006-01-11 15:39:08 +1100946 add_wait_queue(&bp->b_waiters, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 for (;;) {
948 set_current_state(TASK_UNINTERRUPTIBLE);
Nathan Scottce8e9222006-01-11 15:39:08 +1100949 if (atomic_read(&bp->b_pin_count) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 break;
Jens Axboe7eaceac2011-03-10 08:52:07 +0100951 io_schedule();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 }
Nathan Scottce8e9222006-01-11 15:39:08 +1100953 remove_wait_queue(&bp->b_waiters, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 set_current_state(TASK_RUNNING);
955}
956
957/*
958 * Buffer Utility Routines
959 */
960
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961STATIC void
Nathan Scottce8e9222006-01-11 15:39:08 +1100962xfs_buf_iodone_work(
David Howellsc4028952006-11-22 14:57:56 +0000963 struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964{
David Howellsc4028952006-11-22 14:57:56 +0000965 xfs_buf_t *bp =
966 container_of(work, xfs_buf_t, b_iodone_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967
Christoph Hellwig80f6c292010-08-18 05:29:11 -0400968 if (bp->b_iodone)
Nathan Scottce8e9222006-01-11 15:39:08 +1100969 (*(bp->b_iodone))(bp);
970 else if (bp->b_flags & XBF_ASYNC)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 xfs_buf_relse(bp);
972}
973
974void
Nathan Scottce8e9222006-01-11 15:39:08 +1100975xfs_buf_ioend(
976 xfs_buf_t *bp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 int schedule)
978{
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000979 trace_xfs_buf_iodone(bp, _RET_IP_);
980
Lachlan McIlroy77be55a2007-11-23 16:31:00 +1100981 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
Nathan Scottce8e9222006-01-11 15:39:08 +1100982 if (bp->b_error == 0)
983 bp->b_flags |= XBF_DONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
Nathan Scottce8e9222006-01-11 15:39:08 +1100985 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 if (schedule) {
David Howellsc4028952006-11-22 14:57:56 +0000987 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
Nathan Scottce8e9222006-01-11 15:39:08 +1100988 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 } else {
David Howellsc4028952006-11-22 14:57:56 +0000990 xfs_buf_iodone_work(&bp->b_iodone_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 }
992 } else {
David Chinnerb4dd3302008-08-13 16:36:11 +1000993 complete(&bp->b_iowait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 }
995}
996
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997void
Nathan Scottce8e9222006-01-11 15:39:08 +1100998xfs_buf_ioerror(
999 xfs_buf_t *bp,
1000 int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001{
1002 ASSERT(error >= 0 && error <= 0xffff);
Nathan Scottce8e9222006-01-11 15:39:08 +11001003 bp->b_error = (unsigned short)error;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001004 trace_xfs_buf_ioerror(bp, error, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005}
1006
Christoph Hellwig901796a2011-10-10 16:52:49 +00001007void
1008xfs_buf_ioerror_alert(
1009 struct xfs_buf *bp,
1010 const char *func)
1011{
1012 xfs_alert(bp->b_target->bt_mount,
1013"metadata I/O error: block 0x%llx (\"%s\") error %d buf count %zd",
1014 (__uint64_t)XFS_BUF_ADDR(bp), func,
1015 bp->b_error, XFS_BUF_COUNT(bp));
1016}
1017
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018int
Christoph Hellwig64e0bc72010-01-13 22:17:58 +00001019xfs_bwrite(
Christoph Hellwig5d765b92008-12-03 12:20:26 +01001020 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021{
Christoph Hellwig8c383662010-03-12 10:59:40 +00001022 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023
Christoph Hellwig64e0bc72010-01-13 22:17:58 +00001024 bp->b_flags |= XBF_WRITE;
Christoph Hellwig8c383662010-03-12 10:59:40 +00001025 bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026
Christoph Hellwig5d765b92008-12-03 12:20:26 +01001027 xfs_buf_delwri_dequeue(bp);
Christoph Hellwig939d7232010-07-20 17:51:16 +10001028 xfs_bdstrat_cb(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029
Christoph Hellwig8c383662010-03-12 10:59:40 +00001030 error = xfs_buf_iowait(bp);
Christoph Hellwigc2b006c2011-08-23 08:28:07 +00001031 if (error) {
1032 xfs_force_shutdown(bp->b_target->bt_mount,
1033 SHUTDOWN_META_IO_ERROR);
1034 }
Christoph Hellwig64e0bc72010-01-13 22:17:58 +00001035 return error;
Christoph Hellwig5d765b92008-12-03 12:20:26 +01001036}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037
Christoph Hellwig4e234712010-01-13 22:17:56 +00001038/*
1039 * Called when we want to stop a buffer from getting written or read.
Christoph Hellwig1a1a3e92010-10-06 18:41:18 +00001040 * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
Christoph Hellwig4e234712010-01-13 22:17:56 +00001041 * so that the proper iodone callbacks get called.
1042 */
1043STATIC int
1044xfs_bioerror(
1045 xfs_buf_t *bp)
1046{
1047#ifdef XFSERRORDEBUG
1048 ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
1049#endif
1050
1051 /*
1052 * No need to wait until the buffer is unpinned, we aren't flushing it.
1053 */
Chandra Seetharaman5a52c2a582011-07-22 23:39:51 +00001054 xfs_buf_ioerror(bp, EIO);
Christoph Hellwig4e234712010-01-13 22:17:56 +00001055
1056 /*
Christoph Hellwig1a1a3e92010-10-06 18:41:18 +00001057 * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
Christoph Hellwig4e234712010-01-13 22:17:56 +00001058 */
1059 XFS_BUF_UNREAD(bp);
Christoph Hellwig4e234712010-01-13 22:17:56 +00001060 XFS_BUF_UNDONE(bp);
Christoph Hellwigc867cb62011-10-10 16:52:46 +00001061 xfs_buf_stale(bp);
Christoph Hellwig4e234712010-01-13 22:17:56 +00001062
Christoph Hellwig1a1a3e92010-10-06 18:41:18 +00001063 xfs_buf_ioend(bp, 0);
Christoph Hellwig4e234712010-01-13 22:17:56 +00001064
1065 return EIO;
1066}
1067
1068/*
1069 * Same as xfs_bioerror, except that we are releasing the buffer
Christoph Hellwig1a1a3e92010-10-06 18:41:18 +00001070 * here ourselves, and avoiding the xfs_buf_ioend call.
Christoph Hellwig4e234712010-01-13 22:17:56 +00001071 * This is meant for userdata errors; metadata bufs come with
1072 * iodone functions attached, so that we can track down errors.
1073 */
1074STATIC int
1075xfs_bioerror_relse(
1076 struct xfs_buf *bp)
1077{
Chandra Seetharamaned432332011-07-22 23:39:39 +00001078 int64_t fl = bp->b_flags;
Christoph Hellwig4e234712010-01-13 22:17:56 +00001079 /*
1080 * No need to wait until the buffer is unpinned.
1081 * We aren't flushing it.
1082 *
1083 * chunkhold expects B_DONE to be set, whether
1084 * we actually finish the I/O or not. We don't want to
1085 * change that interface.
1086 */
1087 XFS_BUF_UNREAD(bp);
Christoph Hellwig4e234712010-01-13 22:17:56 +00001088 XFS_BUF_DONE(bp);
Christoph Hellwigc867cb62011-10-10 16:52:46 +00001089 xfs_buf_stale(bp);
Christoph Hellwigcb669ca2011-07-13 13:43:49 +02001090 bp->b_iodone = NULL;
Christoph Hellwig0cadda12010-01-19 09:56:44 +00001091 if (!(fl & XBF_ASYNC)) {
Christoph Hellwig4e234712010-01-13 22:17:56 +00001092 /*
1093 * Mark b_error and B_ERROR _both_.
1094 * Lot's of chunkcache code assumes that.
1095 * There's no reason to mark error for
1096 * ASYNC buffers.
1097 */
Chandra Seetharaman5a52c2a582011-07-22 23:39:51 +00001098 xfs_buf_ioerror(bp, EIO);
Christoph Hellwig5fde0322011-10-10 16:52:44 +00001099 complete(&bp->b_iowait);
Christoph Hellwig4e234712010-01-13 22:17:56 +00001100 } else {
1101 xfs_buf_relse(bp);
1102 }
1103
1104 return EIO;
1105}
1106
1107
1108/*
1109 * All xfs metadata buffers except log state machine buffers
1110 * get this attached as their b_bdstrat callback function.
1111 * This is so that we can catch a buffer
1112 * after prematurely unpinning it to forcibly shutdown the filesystem.
1113 */
1114int
1115xfs_bdstrat_cb(
1116 struct xfs_buf *bp)
1117{
Dave Chinnerebad8612010-09-22 10:47:20 +10001118 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
Christoph Hellwig4e234712010-01-13 22:17:56 +00001119 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1120 /*
1121 * Metadata write that didn't get logged but
1122 * written delayed anyway. These aren't associated
1123 * with a transaction, and can be ignored.
1124 */
1125 if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
1126 return xfs_bioerror_relse(bp);
1127 else
1128 return xfs_bioerror(bp);
1129 }
1130
1131 xfs_buf_iorequest(bp);
1132 return 0;
1133}
1134
1135/*
1136 * Wrapper around bdstrat so that we can stop data from going to disk in case
1137 * we are shutting down the filesystem. Typically user data goes thru this
1138 * path; one of the exceptions is the superblock.
1139 */
1140void
1141xfsbdstrat(
1142 struct xfs_mount *mp,
1143 struct xfs_buf *bp)
1144{
1145 if (XFS_FORCED_SHUTDOWN(mp)) {
1146 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1147 xfs_bioerror_relse(bp);
1148 return;
1149 }
1150
1151 xfs_buf_iorequest(bp);
1152}
1153
Christoph Hellwigb8f82a42009-11-14 16:17:22 +00001154STATIC void
Nathan Scottce8e9222006-01-11 15:39:08 +11001155_xfs_buf_ioend(
1156 xfs_buf_t *bp,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157 int schedule)
1158{
Dave Chinner0e6e8472011-03-26 09:16:45 +11001159 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
Nathan Scottce8e9222006-01-11 15:39:08 +11001160 xfs_buf_ioend(bp, schedule);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161}
1162
Al Viro782e3b32007-10-12 07:17:47 +01001163STATIC void
Nathan Scottce8e9222006-01-11 15:39:08 +11001164xfs_buf_bio_end_io(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 struct bio *bio,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 int error)
1167{
Nathan Scottce8e9222006-01-11 15:39:08 +11001168 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169
Lachlan McIlroycfbe5262008-12-12 15:27:25 +11001170 xfs_buf_ioerror(bp, -error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171
James Bottomley73c77e22010-01-25 11:42:24 -06001172 if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1173 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1174
Nathan Scottce8e9222006-01-11 15:39:08 +11001175 _xfs_buf_ioend(bp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177}
1178
1179STATIC void
Nathan Scottce8e9222006-01-11 15:39:08 +11001180_xfs_buf_ioapply(
1181 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182{
Christoph Hellwiga9759f22007-12-07 14:07:08 +11001183 int rw, map_i, total_nr_pages, nr_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 struct bio *bio;
Nathan Scottce8e9222006-01-11 15:39:08 +11001185 int offset = bp->b_offset;
1186 int size = bp->b_count_desired;
1187 sector_t sector = bp->b_bn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188
Nathan Scottce8e9222006-01-11 15:39:08 +11001189 total_nr_pages = bp->b_page_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 map_i = 0;
1191
Christoph Hellwig1d5ae5d2011-07-08 14:36:32 +02001192 if (bp->b_flags & XBF_WRITE) {
1193 if (bp->b_flags & XBF_SYNCIO)
1194 rw = WRITE_SYNC;
1195 else
1196 rw = WRITE;
1197 if (bp->b_flags & XBF_FUA)
1198 rw |= REQ_FUA;
1199 if (bp->b_flags & XBF_FLUSH)
1200 rw |= REQ_FLUSH;
1201 } else if (bp->b_flags & XBF_READ_AHEAD) {
1202 rw = READA;
Nathan Scott51bdd702006-09-28 11:01:57 +10001203 } else {
Christoph Hellwig1d5ae5d2011-07-08 14:36:32 +02001204 rw = READ;
Christoph Hellwigf538d4d2005-11-02 10:26:59 +11001205 }
1206
Christoph Hellwig34951f52011-07-26 15:06:44 +00001207 /* we only use the buffer cache for meta-data */
1208 rw |= REQ_META;
1209
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210next_chunk:
Nathan Scottce8e9222006-01-11 15:39:08 +11001211 atomic_inc(&bp->b_io_remaining);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1213 if (nr_pages > total_nr_pages)
1214 nr_pages = total_nr_pages;
1215
1216 bio = bio_alloc(GFP_NOIO, nr_pages);
Nathan Scottce8e9222006-01-11 15:39:08 +11001217 bio->bi_bdev = bp->b_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 bio->bi_sector = sector;
Nathan Scottce8e9222006-01-11 15:39:08 +11001219 bio->bi_end_io = xfs_buf_bio_end_io;
1220 bio->bi_private = bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221
Dave Chinner0e6e8472011-03-26 09:16:45 +11001222
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 for (; size && nr_pages; nr_pages--, map_i++) {
Dave Chinner0e6e8472011-03-26 09:16:45 +11001224 int rbytes, nbytes = PAGE_SIZE - offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225
1226 if (nbytes > size)
1227 nbytes = size;
1228
Nathan Scottce8e9222006-01-11 15:39:08 +11001229 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1230 if (rbytes < nbytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 break;
1232
1233 offset = 0;
1234 sector += nbytes >> BBSHIFT;
1235 size -= nbytes;
1236 total_nr_pages--;
1237 }
1238
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 if (likely(bio->bi_size)) {
James Bottomley73c77e22010-01-25 11:42:24 -06001240 if (xfs_buf_is_vmapped(bp)) {
1241 flush_kernel_vmap_range(bp->b_addr,
1242 xfs_buf_vmap_len(bp));
1243 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 submit_bio(rw, bio);
1245 if (size)
1246 goto next_chunk;
1247 } else {
Nathan Scottce8e9222006-01-11 15:39:08 +11001248 xfs_buf_ioerror(bp, EIO);
Dave Chinnerec53d1d2010-07-20 17:52:59 +10001249 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 }
1251}
1252
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253int
Nathan Scottce8e9222006-01-11 15:39:08 +11001254xfs_buf_iorequest(
1255 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256{
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001257 trace_xfs_buf_iorequest(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258
Christoph Hellwig375ec692011-08-23 08:28:03 +00001259 ASSERT(!(bp->b_flags & XBF_DELWRI));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
Christoph Hellwig375ec692011-08-23 08:28:03 +00001261 if (bp->b_flags & XBF_WRITE)
Nathan Scottce8e9222006-01-11 15:39:08 +11001262 xfs_buf_wait_unpin(bp);
Nathan Scottce8e9222006-01-11 15:39:08 +11001263 xfs_buf_hold(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264
1265 /* Set the count to 1 initially, this will stop an I/O
1266 * completion callout which happens before we have started
Nathan Scottce8e9222006-01-11 15:39:08 +11001267 * all the I/O from calling xfs_buf_ioend too early.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 */
Nathan Scottce8e9222006-01-11 15:39:08 +11001269 atomic_set(&bp->b_io_remaining, 1);
1270 _xfs_buf_ioapply(bp);
1271 _xfs_buf_ioend(bp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
Nathan Scottce8e9222006-01-11 15:39:08 +11001273 xfs_buf_rele(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 return 0;
1275}
1276
1277/*
Nathan Scottce8e9222006-01-11 15:39:08 +11001278 * Waits for I/O to complete on the buffer supplied.
1279 * It returns immediately if no I/O is pending.
1280 * It returns the I/O error code, if any, or 0 if there was no error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 */
1282int
Nathan Scottce8e9222006-01-11 15:39:08 +11001283xfs_buf_iowait(
1284 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285{
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001286 trace_xfs_buf_iowait(bp, _RET_IP_);
1287
David Chinnerb4dd3302008-08-13 16:36:11 +10001288 wait_for_completion(&bp->b_iowait);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001289
1290 trace_xfs_buf_iowait_done(bp, _RET_IP_);
Nathan Scottce8e9222006-01-11 15:39:08 +11001291 return bp->b_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292}
1293
Nathan Scottce8e9222006-01-11 15:39:08 +11001294xfs_caddr_t
1295xfs_buf_offset(
1296 xfs_buf_t *bp,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 size_t offset)
1298{
1299 struct page *page;
1300
Nathan Scottce8e9222006-01-11 15:39:08 +11001301 if (bp->b_flags & XBF_MAPPED)
Chandra Seetharaman62926042011-07-22 23:40:15 +00001302 return bp->b_addr + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303
Nathan Scottce8e9222006-01-11 15:39:08 +11001304 offset += bp->b_offset;
Dave Chinner0e6e8472011-03-26 09:16:45 +11001305 page = bp->b_pages[offset >> PAGE_SHIFT];
1306 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307}
1308
1309/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 * Move data into or out of a buffer.
1311 */
1312void
Nathan Scottce8e9222006-01-11 15:39:08 +11001313xfs_buf_iomove(
1314 xfs_buf_t *bp, /* buffer to process */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 size_t boff, /* starting buffer offset */
1316 size_t bsize, /* length to copy */
Dave Chinnerb9c48642010-01-20 10:47:39 +11001317 void *data, /* data address */
Nathan Scottce8e9222006-01-11 15:39:08 +11001318 xfs_buf_rw_t mode) /* read/write/zero flag */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319{
1320 size_t bend, cpoff, csize;
1321 struct page *page;
1322
1323 bend = boff + bsize;
1324 while (boff < bend) {
Nathan Scottce8e9222006-01-11 15:39:08 +11001325 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1326 cpoff = xfs_buf_poff(boff + bp->b_offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 csize = min_t(size_t,
Dave Chinner0e6e8472011-03-26 09:16:45 +11001328 PAGE_SIZE-cpoff, bp->b_count_desired-boff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329
Dave Chinner0e6e8472011-03-26 09:16:45 +11001330 ASSERT(((csize + cpoff) <= PAGE_SIZE));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331
1332 switch (mode) {
Nathan Scottce8e9222006-01-11 15:39:08 +11001333 case XBRW_ZERO:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 memset(page_address(page) + cpoff, 0, csize);
1335 break;
Nathan Scottce8e9222006-01-11 15:39:08 +11001336 case XBRW_READ:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 memcpy(data, page_address(page) + cpoff, csize);
1338 break;
Nathan Scottce8e9222006-01-11 15:39:08 +11001339 case XBRW_WRITE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 memcpy(page_address(page) + cpoff, data, csize);
1341 }
1342
1343 boff += csize;
1344 data += csize;
1345 }
1346}
1347
1348/*
Nathan Scottce8e9222006-01-11 15:39:08 +11001349 * Handling of buffer targets (buftargs).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 */
1351
1352/*
Dave Chinner430cbeb2010-12-02 16:30:55 +11001353 * Wait for any bufs with callbacks that have been submitted but have not yet
1354 * returned. These buffers will have an elevated hold count, so wait on those
1355 * while freeing all the buffers only held by the LRU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 */
1357void
1358xfs_wait_buftarg(
Dave Chinner74f75a02010-09-24 19:59:04 +10001359 struct xfs_buftarg *btp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360{
Dave Chinner430cbeb2010-12-02 16:30:55 +11001361 struct xfs_buf *bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362
Dave Chinner430cbeb2010-12-02 16:30:55 +11001363restart:
1364 spin_lock(&btp->bt_lru_lock);
1365 while (!list_empty(&btp->bt_lru)) {
1366 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1367 if (atomic_read(&bp->b_hold) > 1) {
1368 spin_unlock(&btp->bt_lru_lock);
Dave Chinner26af6552010-09-22 10:47:20 +10001369 delay(100);
Dave Chinner430cbeb2010-12-02 16:30:55 +11001370 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 }
Dave Chinner430cbeb2010-12-02 16:30:55 +11001372 /*
1373 * clear the LRU reference count so the bufer doesn't get
1374 * ignored in xfs_buf_rele().
1375 */
1376 atomic_set(&bp->b_lru_ref, 0);
1377 spin_unlock(&btp->bt_lru_lock);
1378 xfs_buf_rele(bp);
1379 spin_lock(&btp->bt_lru_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 }
Dave Chinner430cbeb2010-12-02 16:30:55 +11001381 spin_unlock(&btp->bt_lru_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382}
1383
Dave Chinnerff57ab22010-11-30 17:27:57 +11001384int
1385xfs_buftarg_shrink(
1386 struct shrinker *shrink,
Ying Han1495f232011-05-24 17:12:27 -07001387 struct shrink_control *sc)
David Chinnera6867a62006-01-11 15:37:58 +11001388{
Dave Chinnerff57ab22010-11-30 17:27:57 +11001389 struct xfs_buftarg *btp = container_of(shrink,
1390 struct xfs_buftarg, bt_shrinker);
Dave Chinner430cbeb2010-12-02 16:30:55 +11001391 struct xfs_buf *bp;
Ying Han1495f232011-05-24 17:12:27 -07001392 int nr_to_scan = sc->nr_to_scan;
Dave Chinner430cbeb2010-12-02 16:30:55 +11001393 LIST_HEAD(dispose);
1394
1395 if (!nr_to_scan)
1396 return btp->bt_lru_nr;
1397
1398 spin_lock(&btp->bt_lru_lock);
1399 while (!list_empty(&btp->bt_lru)) {
1400 if (nr_to_scan-- <= 0)
1401 break;
1402
1403 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1404
1405 /*
1406 * Decrement the b_lru_ref count unless the value is already
1407 * zero. If the value is already zero, we need to reclaim the
1408 * buffer, otherwise it gets another trip through the LRU.
1409 */
1410 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1411 list_move_tail(&bp->b_lru, &btp->bt_lru);
1412 continue;
1413 }
1414
1415 /*
1416 * remove the buffer from the LRU now to avoid needing another
1417 * lock round trip inside xfs_buf_rele().
1418 */
1419 list_move(&bp->b_lru, &dispose);
1420 btp->bt_lru_nr--;
Dave Chinnerff57ab22010-11-30 17:27:57 +11001421 }
Dave Chinner430cbeb2010-12-02 16:30:55 +11001422 spin_unlock(&btp->bt_lru_lock);
1423
1424 while (!list_empty(&dispose)) {
1425 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1426 list_del_init(&bp->b_lru);
1427 xfs_buf_rele(bp);
1428 }
1429
1430 return btp->bt_lru_nr;
David Chinnera6867a62006-01-11 15:37:58 +11001431}
1432
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433void
1434xfs_free_buftarg(
Christoph Hellwigb7963132009-03-03 14:48:37 -05001435 struct xfs_mount *mp,
1436 struct xfs_buftarg *btp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437{
Dave Chinnerff57ab22010-11-30 17:27:57 +11001438 unregister_shrinker(&btp->bt_shrinker);
1439
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 xfs_flush_buftarg(btp, 1);
Christoph Hellwigb7963132009-03-03 14:48:37 -05001441 if (mp->m_flags & XFS_MOUNT_BARRIER)
1442 xfs_blkdev_issue_flush(btp);
David Chinnera6867a62006-01-11 15:37:58 +11001443
David Chinnera6867a62006-01-11 15:37:58 +11001444 kthread_stop(btp->bt_task);
Denys Vlasenkof0e2d932008-05-19 16:31:57 +10001445 kmem_free(btp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446}
1447
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448STATIC int
1449xfs_setsize_buftarg_flags(
1450 xfs_buftarg_t *btp,
1451 unsigned int blocksize,
1452 unsigned int sectorsize,
1453 int verbose)
1454{
Nathan Scottce8e9222006-01-11 15:39:08 +11001455 btp->bt_bsize = blocksize;
1456 btp->bt_sshift = ffs(sectorsize) - 1;
1457 btp->bt_smask = sectorsize - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458
Nathan Scottce8e9222006-01-11 15:39:08 +11001459 if (set_blocksize(btp->bt_bdev, sectorsize)) {
Dave Chinner4f107002011-03-07 10:00:35 +11001460 xfs_warn(btp->bt_mount,
1461 "Cannot set_blocksize to %u on device %s\n",
Chandra Seetharamanc35a5492011-07-22 23:40:46 +00001462 sectorsize, xfs_buf_target_name(btp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 return EINVAL;
1464 }
1465
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 return 0;
1467}
1468
1469/*
Nathan Scottce8e9222006-01-11 15:39:08 +11001470 * When allocating the initial buffer target we have not yet
1471 * read in the superblock, so don't know what sized sectors
1472 * are being used is at this early stage. Play safe.
1473 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474STATIC int
1475xfs_setsize_buftarg_early(
1476 xfs_buftarg_t *btp,
1477 struct block_device *bdev)
1478{
1479 return xfs_setsize_buftarg_flags(btp,
Dave Chinner0e6e8472011-03-26 09:16:45 +11001480 PAGE_SIZE, bdev_logical_block_size(bdev), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481}
1482
1483int
1484xfs_setsize_buftarg(
1485 xfs_buftarg_t *btp,
1486 unsigned int blocksize,
1487 unsigned int sectorsize)
1488{
1489 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1490}
1491
1492STATIC int
Christoph Hellwigc4e1c092011-08-23 08:28:08 +00001493xfs_alloc_delwri_queue(
Jan Engelhardte2a07812010-03-23 09:52:55 +11001494 xfs_buftarg_t *btp,
1495 const char *fsname)
David Chinnera6867a62006-01-11 15:37:58 +11001496{
Christoph Hellwigc4e1c092011-08-23 08:28:08 +00001497 INIT_LIST_HEAD(&btp->bt_delwri_queue);
1498 spin_lock_init(&btp->bt_delwri_lock);
David Chinnera6867a62006-01-11 15:37:58 +11001499 btp->bt_flags = 0;
Jan Engelhardte2a07812010-03-23 09:52:55 +11001500 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
Dave Chinnerff57ab22010-11-30 17:27:57 +11001501 if (IS_ERR(btp->bt_task))
1502 return PTR_ERR(btp->bt_task);
1503 return 0;
David Chinnera6867a62006-01-11 15:37:58 +11001504}
1505
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506xfs_buftarg_t *
1507xfs_alloc_buftarg(
Dave Chinnerebad8612010-09-22 10:47:20 +10001508 struct xfs_mount *mp,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 struct block_device *bdev,
Jan Engelhardte2a07812010-03-23 09:52:55 +11001510 int external,
1511 const char *fsname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512{
1513 xfs_buftarg_t *btp;
1514
1515 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1516
Dave Chinnerebad8612010-09-22 10:47:20 +10001517 btp->bt_mount = mp;
Nathan Scottce8e9222006-01-11 15:39:08 +11001518 btp->bt_dev = bdev->bd_dev;
1519 btp->bt_bdev = bdev;
Dave Chinner0e6e8472011-03-26 09:16:45 +11001520 btp->bt_bdi = blk_get_backing_dev_info(bdev);
1521 if (!btp->bt_bdi)
1522 goto error;
1523
Dave Chinner430cbeb2010-12-02 16:30:55 +11001524 INIT_LIST_HEAD(&btp->bt_lru);
1525 spin_lock_init(&btp->bt_lru_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 if (xfs_setsize_buftarg_early(btp, bdev))
1527 goto error;
Christoph Hellwigc4e1c092011-08-23 08:28:08 +00001528 if (xfs_alloc_delwri_queue(btp, fsname))
David Chinnera6867a62006-01-11 15:37:58 +11001529 goto error;
Dave Chinnerff57ab22010-11-30 17:27:57 +11001530 btp->bt_shrinker.shrink = xfs_buftarg_shrink;
1531 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1532 register_shrinker(&btp->bt_shrinker);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 return btp;
1534
1535error:
Denys Vlasenkof0e2d932008-05-19 16:31:57 +10001536 kmem_free(btp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 return NULL;
1538}
1539
1540
1541/*
Nathan Scottce8e9222006-01-11 15:39:08 +11001542 * Delayed write buffer handling
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 */
Christoph Hellwig61551f12011-08-23 08:28:06 +00001544void
Nathan Scottce8e9222006-01-11 15:39:08 +11001545xfs_buf_delwri_queue(
Christoph Hellwig527cfdf2011-08-23 08:28:04 +00001546 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547{
Christoph Hellwigc4e1c092011-08-23 08:28:08 +00001548 struct xfs_buftarg *btp = bp->b_target;
David Chinnera6867a62006-01-11 15:37:58 +11001549
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001550 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1551
Christoph Hellwig5a8ee6b2011-08-23 08:28:05 +00001552 ASSERT(!(bp->b_flags & XBF_READ));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553
Christoph Hellwigc4e1c092011-08-23 08:28:08 +00001554 spin_lock(&btp->bt_delwri_lock);
Nathan Scottce8e9222006-01-11 15:39:08 +11001555 if (!list_empty(&bp->b_list)) {
Christoph Hellwig5a8ee6b2011-08-23 08:28:05 +00001556 /* if already in the queue, move it to the tail */
Nathan Scottce8e9222006-01-11 15:39:08 +11001557 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
Christoph Hellwigc4e1c092011-08-23 08:28:08 +00001558 list_move_tail(&bp->b_list, &btp->bt_delwri_queue);
Christoph Hellwig5a8ee6b2011-08-23 08:28:05 +00001559 } else {
Dave Chinnerc9c12972010-01-11 11:49:59 +00001560 /* start xfsbufd as it is about to have something to do */
Christoph Hellwigc4e1c092011-08-23 08:28:08 +00001561 if (list_empty(&btp->bt_delwri_queue))
Christoph Hellwig5a8ee6b2011-08-23 08:28:05 +00001562 wake_up_process(bp->b_target->bt_task);
Dave Chinnerc9c12972010-01-11 11:49:59 +00001563
Christoph Hellwig5a8ee6b2011-08-23 08:28:05 +00001564 atomic_inc(&bp->b_hold);
1565 bp->b_flags |= XBF_DELWRI | _XBF_DELWRI_Q | XBF_ASYNC;
Christoph Hellwigc4e1c092011-08-23 08:28:08 +00001566 list_add_tail(&bp->b_list, &btp->bt_delwri_queue);
Christoph Hellwig5a8ee6b2011-08-23 08:28:05 +00001567 }
Nathan Scottce8e9222006-01-11 15:39:08 +11001568 bp->b_queuetime = jiffies;
Christoph Hellwigc4e1c092011-08-23 08:28:08 +00001569 spin_unlock(&btp->bt_delwri_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570}
1571
1572void
Nathan Scottce8e9222006-01-11 15:39:08 +11001573xfs_buf_delwri_dequeue(
1574 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575{
1576 int dequeued = 0;
1577
Christoph Hellwigc4e1c092011-08-23 08:28:08 +00001578 spin_lock(&bp->b_target->bt_delwri_lock);
Nathan Scottce8e9222006-01-11 15:39:08 +11001579 if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1580 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1581 list_del_init(&bp->b_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 dequeued = 1;
1583 }
Nathan Scottce8e9222006-01-11 15:39:08 +11001584 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
Christoph Hellwigc4e1c092011-08-23 08:28:08 +00001585 spin_unlock(&bp->b_target->bt_delwri_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586
1587 if (dequeued)
Nathan Scottce8e9222006-01-11 15:39:08 +11001588 xfs_buf_rele(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001590 trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591}
1592
Dave Chinnerd808f612010-02-02 10:13:42 +11001593/*
1594 * If a delwri buffer needs to be pushed before it has aged out, then promote
1595 * it to the head of the delwri queue so that it will be flushed on the next
1596 * xfsbufd run. We do this by resetting the queuetime of the buffer to be older
1597 * than the age currently needed to flush the buffer. Hence the next time the
1598 * xfsbufd sees it is guaranteed to be considered old enough to flush.
1599 */
1600void
1601xfs_buf_delwri_promote(
1602 struct xfs_buf *bp)
1603{
1604 struct xfs_buftarg *btp = bp->b_target;
1605 long age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1;
1606
1607 ASSERT(bp->b_flags & XBF_DELWRI);
1608 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1609
1610 /*
1611 * Check the buffer age before locking the delayed write queue as we
1612 * don't need to promote buffers that are already past the flush age.
1613 */
1614 if (bp->b_queuetime < jiffies - age)
1615 return;
1616 bp->b_queuetime = jiffies - age;
Christoph Hellwigc4e1c092011-08-23 08:28:08 +00001617 spin_lock(&btp->bt_delwri_lock);
1618 list_move(&bp->b_list, &btp->bt_delwri_queue);
1619 spin_unlock(&btp->bt_delwri_lock);
Dave Chinnerd808f612010-02-02 10:13:42 +11001620}
1621
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622STATIC void
Nathan Scottce8e9222006-01-11 15:39:08 +11001623xfs_buf_runall_queues(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 struct workqueue_struct *queue)
1625{
1626 flush_workqueue(queue);
1627}
1628
David Chinner585e6d82007-02-10 18:32:29 +11001629/*
1630 * Move as many buffers as specified to the supplied list
1631 * idicating if we skipped any buffers to prevent deadlocks.
1632 */
1633STATIC int
1634xfs_buf_delwri_split(
1635 xfs_buftarg_t *target,
1636 struct list_head *list,
David Chinner5e6a07d2007-02-10 18:34:49 +11001637 unsigned long age)
David Chinner585e6d82007-02-10 18:32:29 +11001638{
1639 xfs_buf_t *bp, *n;
David Chinner585e6d82007-02-10 18:32:29 +11001640 int skipped = 0;
David Chinner5e6a07d2007-02-10 18:34:49 +11001641 int force;
David Chinner585e6d82007-02-10 18:32:29 +11001642
David Chinner5e6a07d2007-02-10 18:34:49 +11001643 force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
David Chinner585e6d82007-02-10 18:32:29 +11001644 INIT_LIST_HEAD(list);
Christoph Hellwigc4e1c092011-08-23 08:28:08 +00001645 spin_lock(&target->bt_delwri_lock);
1646 list_for_each_entry_safe(bp, n, &target->bt_delwri_queue, b_list) {
David Chinner585e6d82007-02-10 18:32:29 +11001647 ASSERT(bp->b_flags & XBF_DELWRI);
1648
Chandra Seetharaman811e64c2011-07-22 23:40:27 +00001649 if (!xfs_buf_ispinned(bp) && xfs_buf_trylock(bp)) {
David Chinner5e6a07d2007-02-10 18:34:49 +11001650 if (!force &&
David Chinner585e6d82007-02-10 18:32:29 +11001651 time_before(jiffies, bp->b_queuetime + age)) {
1652 xfs_buf_unlock(bp);
1653 break;
1654 }
1655
Christoph Hellwig1d5ae5d2011-07-08 14:36:32 +02001656 bp->b_flags &= ~(XBF_DELWRI | _XBF_DELWRI_Q);
David Chinner585e6d82007-02-10 18:32:29 +11001657 bp->b_flags |= XBF_WRITE;
1658 list_move_tail(&bp->b_list, list);
Dave Chinnerbfe27412010-11-08 08:55:05 +00001659 trace_xfs_buf_delwri_split(bp, _RET_IP_);
David Chinner585e6d82007-02-10 18:32:29 +11001660 } else
1661 skipped++;
1662 }
David Chinner585e6d82007-02-10 18:32:29 +11001663
Christoph Hellwigc4e1c092011-08-23 08:28:08 +00001664 spin_unlock(&target->bt_delwri_lock);
David Chinner585e6d82007-02-10 18:32:29 +11001665 return skipped;
David Chinner585e6d82007-02-10 18:32:29 +11001666}
1667
Dave Chinner089716a2010-01-26 15:13:25 +11001668/*
1669 * Compare function is more complex than it needs to be because
1670 * the return value is only 32 bits and we are doing comparisons
1671 * on 64 bit values
1672 */
1673static int
1674xfs_buf_cmp(
1675 void *priv,
1676 struct list_head *a,
1677 struct list_head *b)
1678{
1679 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
1680 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
1681 xfs_daddr_t diff;
1682
1683 diff = ap->b_bn - bp->b_bn;
1684 if (diff < 0)
1685 return -1;
1686 if (diff > 0)
1687 return 1;
1688 return 0;
1689}
1690
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691STATIC int
Christoph Hellwig23ea4032005-06-21 15:14:01 +10001692xfsbufd(
David Chinner585e6d82007-02-10 18:32:29 +11001693 void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694{
Dave Chinner089716a2010-01-26 15:13:25 +11001695 xfs_buftarg_t *target = (xfs_buftarg_t *)data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 current->flags |= PF_MEMALLOC;
1698
Rafael J. Wysocki978c7b22007-12-07 14:09:02 +11001699 set_freezable();
1700
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 do {
Dave Chinnerc9c12972010-01-11 11:49:59 +00001702 long age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1703 long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
Dave Chinner089716a2010-01-26 15:13:25 +11001704 struct list_head tmp;
Christoph Hellwiga1b7ea52011-03-30 11:05:09 +00001705 struct blk_plug plug;
Dave Chinnerc9c12972010-01-11 11:49:59 +00001706
Christoph Lameter3e1d1d22005-06-24 23:13:50 -07001707 if (unlikely(freezing(current))) {
Nathan Scottce8e9222006-01-11 15:39:08 +11001708 set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
Christoph Lameter3e1d1d22005-06-24 23:13:50 -07001709 refrigerator();
Nathan Scottabd0cf72005-05-05 13:30:13 -07001710 } else {
Nathan Scottce8e9222006-01-11 15:39:08 +11001711 clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
Nathan Scottabd0cf72005-05-05 13:30:13 -07001712 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713
Dave Chinnerc9c12972010-01-11 11:49:59 +00001714 /* sleep for a long time if there is nothing to do. */
Christoph Hellwigc4e1c092011-08-23 08:28:08 +00001715 if (list_empty(&target->bt_delwri_queue))
Dave Chinnerc9c12972010-01-11 11:49:59 +00001716 tout = MAX_SCHEDULE_TIMEOUT;
1717 schedule_timeout_interruptible(tout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718
Dave Chinnerc9c12972010-01-11 11:49:59 +00001719 xfs_buf_delwri_split(target, &tmp, age);
Dave Chinner089716a2010-01-26 15:13:25 +11001720 list_sort(NULL, &tmp, xfs_buf_cmp);
Christoph Hellwiga1b7ea52011-03-30 11:05:09 +00001721
1722 blk_start_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 while (!list_empty(&tmp)) {
Dave Chinner089716a2010-01-26 15:13:25 +11001724 struct xfs_buf *bp;
1725 bp = list_first_entry(&tmp, struct xfs_buf, b_list);
Nathan Scottce8e9222006-01-11 15:39:08 +11001726 list_del_init(&bp->b_list);
Christoph Hellwig939d7232010-07-20 17:51:16 +10001727 xfs_bdstrat_cb(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 }
Christoph Hellwiga1b7ea52011-03-30 11:05:09 +00001729 blk_finish_plug(&plug);
Christoph Hellwig4df08c52005-09-05 08:34:18 +10001730 } while (!kthread_should_stop());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
Christoph Hellwig4df08c52005-09-05 08:34:18 +10001732 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733}
1734
1735/*
Nathan Scottce8e9222006-01-11 15:39:08 +11001736 * Go through all incore buffers, and release buffers if they belong to
1737 * the given device. This is used in filesystem error handling to
1738 * preserve the consistency of its metadata.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 */
1740int
1741xfs_flush_buftarg(
David Chinner585e6d82007-02-10 18:32:29 +11001742 xfs_buftarg_t *target,
1743 int wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744{
Dave Chinner089716a2010-01-26 15:13:25 +11001745 xfs_buf_t *bp;
David Chinner585e6d82007-02-10 18:32:29 +11001746 int pincount = 0;
Dave Chinner089716a2010-01-26 15:13:25 +11001747 LIST_HEAD(tmp_list);
1748 LIST_HEAD(wait_list);
Christoph Hellwiga1b7ea52011-03-30 11:05:09 +00001749 struct blk_plug plug;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750
Dave Chinnerc626d172009-04-06 18:42:11 +02001751 xfs_buf_runall_queues(xfsconvertd_workqueue);
Nathan Scottce8e9222006-01-11 15:39:08 +11001752 xfs_buf_runall_queues(xfsdatad_workqueue);
1753 xfs_buf_runall_queues(xfslogd_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754
David Chinner5e6a07d2007-02-10 18:34:49 +11001755 set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
Dave Chinner089716a2010-01-26 15:13:25 +11001756 pincount = xfs_buf_delwri_split(target, &tmp_list, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757
1758 /*
Dave Chinner089716a2010-01-26 15:13:25 +11001759 * Dropped the delayed write list lock, now walk the temporary list.
1760 * All I/O is issued async and then if we need to wait for completion
1761 * we do that after issuing all the IO.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 */
Dave Chinner089716a2010-01-26 15:13:25 +11001763 list_sort(NULL, &tmp_list, xfs_buf_cmp);
Christoph Hellwiga1b7ea52011-03-30 11:05:09 +00001764
1765 blk_start_plug(&plug);
Dave Chinner089716a2010-01-26 15:13:25 +11001766 while (!list_empty(&tmp_list)) {
1767 bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
David Chinner585e6d82007-02-10 18:32:29 +11001768 ASSERT(target == bp->b_target);
Dave Chinner089716a2010-01-26 15:13:25 +11001769 list_del_init(&bp->b_list);
1770 if (wait) {
Nathan Scottce8e9222006-01-11 15:39:08 +11001771 bp->b_flags &= ~XBF_ASYNC;
Dave Chinner089716a2010-01-26 15:13:25 +11001772 list_add(&bp->b_list, &wait_list);
1773 }
Christoph Hellwig939d7232010-07-20 17:51:16 +10001774 xfs_bdstrat_cb(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 }
Christoph Hellwiga1b7ea52011-03-30 11:05:09 +00001776 blk_finish_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777
Dave Chinner089716a2010-01-26 15:13:25 +11001778 if (wait) {
Christoph Hellwiga1b7ea52011-03-30 11:05:09 +00001779 /* Wait for IO to complete. */
Dave Chinner089716a2010-01-26 15:13:25 +11001780 while (!list_empty(&wait_list)) {
1781 bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
Nathan Scottf07c2252006-09-28 10:52:15 +10001782
Dave Chinner089716a2010-01-26 15:13:25 +11001783 list_del_init(&bp->b_list);
Christoph Hellwig1a1a3e92010-10-06 18:41:18 +00001784 xfs_buf_iowait(bp);
Dave Chinner089716a2010-01-26 15:13:25 +11001785 xfs_buf_relse(bp);
1786 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 }
1788
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 return pincount;
1790}
1791
Christoph Hellwig04d8b282005-11-02 10:15:05 +11001792int __init
Nathan Scottce8e9222006-01-11 15:39:08 +11001793xfs_buf_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794{
Nathan Scott87582802006-03-14 13:18:19 +11001795 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1796 KM_ZONE_HWALIGN, NULL);
Nathan Scottce8e9222006-01-11 15:39:08 +11001797 if (!xfs_buf_zone)
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001798 goto out;
Christoph Hellwig04d8b282005-11-02 10:15:05 +11001799
Dave Chinner51749e42010-09-08 09:00:22 +00001800 xfslogd_workqueue = alloc_workqueue("xfslogd",
Tejun Heo6370a6a2010-10-11 15:12:27 +02001801 WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
Christoph Hellwig23ea4032005-06-21 15:14:01 +10001802 if (!xfslogd_workqueue)
Christoph Hellwig04d8b282005-11-02 10:15:05 +11001803 goto out_free_buf_zone;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804
Tejun Heo83e75902011-02-01 11:42:43 +01001805 xfsdatad_workqueue = alloc_workqueue("xfsdatad", WQ_MEM_RECLAIM, 1);
Christoph Hellwig23ea4032005-06-21 15:14:01 +10001806 if (!xfsdatad_workqueue)
1807 goto out_destroy_xfslogd_workqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
Tejun Heo83e75902011-02-01 11:42:43 +01001809 xfsconvertd_workqueue = alloc_workqueue("xfsconvertd",
1810 WQ_MEM_RECLAIM, 1);
Dave Chinnerc626d172009-04-06 18:42:11 +02001811 if (!xfsconvertd_workqueue)
1812 goto out_destroy_xfsdatad_workqueue;
1813
Christoph Hellwig23ea4032005-06-21 15:14:01 +10001814 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815
Dave Chinnerc626d172009-04-06 18:42:11 +02001816 out_destroy_xfsdatad_workqueue:
1817 destroy_workqueue(xfsdatad_workqueue);
Christoph Hellwig23ea4032005-06-21 15:14:01 +10001818 out_destroy_xfslogd_workqueue:
1819 destroy_workqueue(xfslogd_workqueue);
Christoph Hellwig23ea4032005-06-21 15:14:01 +10001820 out_free_buf_zone:
Nathan Scottce8e9222006-01-11 15:39:08 +11001821 kmem_zone_destroy(xfs_buf_zone);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001822 out:
Nathan Scott87582802006-03-14 13:18:19 +11001823 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824}
1825
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826void
Nathan Scottce8e9222006-01-11 15:39:08 +11001827xfs_buf_terminate(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828{
Dave Chinnerc626d172009-04-06 18:42:11 +02001829 destroy_workqueue(xfsconvertd_workqueue);
Christoph Hellwig04d8b282005-11-02 10:15:05 +11001830 destroy_workqueue(xfsdatad_workqueue);
1831 destroy_workqueue(xfslogd_workqueue);
Nathan Scottce8e9222006-01-11 15:39:08 +11001832 kmem_zone_destroy(xfs_buf_zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833}