blob: 9fa9c43046132b9975e406bc698b422b272c442a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scottf07c2252006-09-28 10:52:15 +10002 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Nathan Scott7b718762005-11-02 14:58:39 +11003 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Vlad Apostolov93c189c2006-11-11 18:03:49 +110018#include "xfs.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/stddef.h>
20#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/pagemap.h>
23#include <linux/init.h>
24#include <linux/vmalloc.h>
25#include <linux/bio.h>
26#include <linux/sysctl.h>
27#include <linux/proc_fs.h>
28#include <linux/workqueue.h>
29#include <linux/percpu.h>
30#include <linux/blkdev.h>
31#include <linux/hash.h>
Christoph Hellwig4df08c52005-09-05 08:34:18 +100032#include <linux/kthread.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080033#include <linux/migrate.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070034#include <linux/backing-dev.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080035#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Dave Chinner239880e2013-10-23 10:50:10 +110037#include "xfs_log_format.h"
Dave Chinner7fd36c42013-08-12 20:49:32 +100038#include "xfs_trans_resv.h"
Dave Chinner239880e2013-10-23 10:50:10 +110039#include "xfs_sb.h"
Christoph Hellwigb7963132009-03-03 14:48:37 -050040#include "xfs_ag.h"
Christoph Hellwigb7963132009-03-03 14:48:37 -050041#include "xfs_mount.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000042#include "xfs_trace.h"
Dave Chinner239880e2013-10-23 10:50:10 +110043#include "xfs_log.h"
Christoph Hellwigb7963132009-03-03 14:48:37 -050044
David Chinner7989cb82007-02-10 18:34:56 +110045static kmem_zone_t *xfs_buf_zone;
Christoph Hellwig23ea4032005-06-21 15:14:01 +100046
David Chinner7989cb82007-02-10 18:34:56 +110047static struct workqueue_struct *xfslogd_workqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Nathan Scottce8e9222006-01-11 15:39:08 +110049#ifdef XFS_BUF_LOCK_TRACKING
50# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
51# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
52# define XB_GET_OWNER(bp) ((bp)->b_last_holder)
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#else
Nathan Scottce8e9222006-01-11 15:39:08 +110054# define XB_SET_OWNER(bp) do { } while (0)
55# define XB_CLEAR_OWNER(bp) do { } while (0)
56# define XB_GET_OWNER(bp) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#endif
58
Nathan Scottce8e9222006-01-11 15:39:08 +110059#define xb_to_gfp(flags) \
Dave Chinneraa5c1582012-04-23 15:58:56 +100060 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
James Bottomley73c77e22010-01-25 11:42:24 -060063static inline int
64xfs_buf_is_vmapped(
65 struct xfs_buf *bp)
66{
67 /*
68 * Return true if the buffer is vmapped.
69 *
Dave Chinner611c9942012-04-23 15:59:07 +100070 * b_addr is null if the buffer is not mapped, but the code is clever
71 * enough to know it doesn't have to map a single page, so the check has
72 * to be both for b_addr and bp->b_page_count > 1.
James Bottomley73c77e22010-01-25 11:42:24 -060073 */
Dave Chinner611c9942012-04-23 15:59:07 +100074 return bp->b_addr && bp->b_page_count > 1;
James Bottomley73c77e22010-01-25 11:42:24 -060075}
76
77static inline int
78xfs_buf_vmap_len(
79 struct xfs_buf *bp)
80{
81 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
82}
83
Linus Torvalds1da177e2005-04-16 15:20:36 -070084/*
Dave Chinner430cbeb2010-12-02 16:30:55 +110085 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
86 * b_lru_ref count so that the buffer is freed immediately when the buffer
87 * reference count falls to zero. If the buffer is already on the LRU, we need
88 * to remove the reference that LRU holds on the buffer.
89 *
90 * This prevents build-up of stale buffers on the LRU.
91 */
92void
93xfs_buf_stale(
94 struct xfs_buf *bp)
95{
Christoph Hellwig43ff2122012-04-23 15:58:39 +100096 ASSERT(xfs_buf_islocked(bp));
97
Dave Chinner430cbeb2010-12-02 16:30:55 +110098 bp->b_flags |= XBF_STALE;
Christoph Hellwig43ff2122012-04-23 15:58:39 +100099
100 /*
101 * Clear the delwri status so that a delwri queue walker will not
102 * flush this buffer to disk now that it is stale. The delwri queue has
103 * a reference to the buffer, so this is safe to do.
104 */
105 bp->b_flags &= ~_XBF_DELWRI_Q;
106
Dave Chinnera4082352013-08-28 10:18:06 +1000107 spin_lock(&bp->b_lock);
108 atomic_set(&bp->b_lru_ref, 0);
109 if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
Dave Chinnere80dfa12013-08-28 10:18:05 +1000110 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
111 atomic_dec(&bp->b_hold);
Dave Chinner430cbeb2010-12-02 16:30:55 +1100112
Dave Chinner430cbeb2010-12-02 16:30:55 +1100113 ASSERT(atomic_read(&bp->b_hold) >= 1);
Dave Chinnera4082352013-08-28 10:18:06 +1000114 spin_unlock(&bp->b_lock);
Dave Chinner430cbeb2010-12-02 16:30:55 +1100115}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
Dave Chinner3e85c862012-06-22 18:50:09 +1000117static int
118xfs_buf_get_maps(
119 struct xfs_buf *bp,
120 int map_count)
121{
122 ASSERT(bp->b_maps == NULL);
123 bp->b_map_count = map_count;
124
125 if (map_count == 1) {
Mark Tinguelyf4b42422012-12-04 17:18:02 -0600126 bp->b_maps = &bp->__b_map;
Dave Chinner3e85c862012-06-22 18:50:09 +1000127 return 0;
128 }
129
130 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
131 KM_NOFS);
132 if (!bp->b_maps)
133 return ENOMEM;
134 return 0;
135}
136
137/*
138 * Frees b_pages if it was allocated.
139 */
140static void
141xfs_buf_free_maps(
142 struct xfs_buf *bp)
143{
Mark Tinguelyf4b42422012-12-04 17:18:02 -0600144 if (bp->b_maps != &bp->__b_map) {
Dave Chinner3e85c862012-06-22 18:50:09 +1000145 kmem_free(bp->b_maps);
146 bp->b_maps = NULL;
147 }
148}
149
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000150struct xfs_buf *
Dave Chinner3e85c862012-06-22 18:50:09 +1000151_xfs_buf_alloc(
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000152 struct xfs_buftarg *target,
Dave Chinner3e85c862012-06-22 18:50:09 +1000153 struct xfs_buf_map *map,
154 int nmaps,
Nathan Scottce8e9222006-01-11 15:39:08 +1100155 xfs_buf_flags_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000157 struct xfs_buf *bp;
Dave Chinner3e85c862012-06-22 18:50:09 +1000158 int error;
159 int i;
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000160
Dave Chinneraa5c1582012-04-23 15:58:56 +1000161 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000162 if (unlikely(!bp))
163 return NULL;
164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 /*
Dave Chinner12bcb3f2012-04-23 15:59:05 +1000166 * We don't want certain flags to appear in b_flags unless they are
167 * specifically set by later operations on the buffer.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 */
Dave Chinner611c9942012-04-23 15:59:07 +1000169 flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
Nathan Scottce8e9222006-01-11 15:39:08 +1100171 atomic_set(&bp->b_hold, 1);
Dave Chinner430cbeb2010-12-02 16:30:55 +1100172 atomic_set(&bp->b_lru_ref, 1);
David Chinnerb4dd3302008-08-13 16:36:11 +1000173 init_completion(&bp->b_iowait);
Dave Chinner430cbeb2010-12-02 16:30:55 +1100174 INIT_LIST_HEAD(&bp->b_lru);
Nathan Scottce8e9222006-01-11 15:39:08 +1100175 INIT_LIST_HEAD(&bp->b_list);
Dave Chinner74f75a02010-09-24 19:59:04 +1000176 RB_CLEAR_NODE(&bp->b_rbnode);
Thomas Gleixnera731cd12010-09-07 14:33:15 +0000177 sema_init(&bp->b_sema, 0); /* held, no waiters */
Dave Chinnera4082352013-08-28 10:18:06 +1000178 spin_lock_init(&bp->b_lock);
Nathan Scottce8e9222006-01-11 15:39:08 +1100179 XB_SET_OWNER(bp);
180 bp->b_target = target;
Dave Chinner3e85c862012-06-22 18:50:09 +1000181 bp->b_flags = flags;
Dave Chinnerde1cbee2012-04-23 15:58:50 +1000182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 /*
Dave Chinneraa0e8832012-04-23 15:58:52 +1000184 * Set length and io_length to the same value initially.
185 * I/O routines should use io_length, which will be the same in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 * most cases but may be reset (e.g. XFS recovery).
187 */
Dave Chinner3e85c862012-06-22 18:50:09 +1000188 error = xfs_buf_get_maps(bp, nmaps);
189 if (error) {
190 kmem_zone_free(xfs_buf_zone, bp);
191 return NULL;
192 }
193
194 bp->b_bn = map[0].bm_bn;
195 bp->b_length = 0;
196 for (i = 0; i < nmaps; i++) {
197 bp->b_maps[i].bm_bn = map[i].bm_bn;
198 bp->b_maps[i].bm_len = map[i].bm_len;
199 bp->b_length += map[i].bm_len;
200 }
201 bp->b_io_length = bp->b_length;
202
Nathan Scottce8e9222006-01-11 15:39:08 +1100203 atomic_set(&bp->b_pin_count, 0);
204 init_waitqueue_head(&bp->b_waiters);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
Nathan Scottce8e9222006-01-11 15:39:08 +1100206 XFS_STATS_INC(xb_create);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000207 trace_xfs_buf_init(bp, _RET_IP_);
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000208
209 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210}
211
212/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100213 * Allocate a page array capable of holding a specified number
214 * of pages, and point the page buf at it.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 */
216STATIC int
Nathan Scottce8e9222006-01-11 15:39:08 +1100217_xfs_buf_get_pages(
218 xfs_buf_t *bp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 int page_count,
Nathan Scottce8e9222006-01-11 15:39:08 +1100220 xfs_buf_flags_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221{
222 /* Make sure that we have a page list */
Nathan Scottce8e9222006-01-11 15:39:08 +1100223 if (bp->b_pages == NULL) {
Nathan Scottce8e9222006-01-11 15:39:08 +1100224 bp->b_page_count = page_count;
225 if (page_count <= XB_PAGES) {
226 bp->b_pages = bp->b_page_array;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 } else {
Nathan Scottce8e9222006-01-11 15:39:08 +1100228 bp->b_pages = kmem_alloc(sizeof(struct page *) *
Dave Chinneraa5c1582012-04-23 15:58:56 +1000229 page_count, KM_NOFS);
Nathan Scottce8e9222006-01-11 15:39:08 +1100230 if (bp->b_pages == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 return -ENOMEM;
232 }
Nathan Scottce8e9222006-01-11 15:39:08 +1100233 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 }
235 return 0;
236}
237
238/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100239 * Frees b_pages if it was allocated.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 */
241STATIC void
Nathan Scottce8e9222006-01-11 15:39:08 +1100242_xfs_buf_free_pages(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 xfs_buf_t *bp)
244{
Nathan Scottce8e9222006-01-11 15:39:08 +1100245 if (bp->b_pages != bp->b_page_array) {
Denys Vlasenkof0e2d932008-05-19 16:31:57 +1000246 kmem_free(bp->b_pages);
Dave Chinner3fc98b12009-12-14 23:11:57 +0000247 bp->b_pages = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 }
249}
250
251/*
252 * Releases the specified buffer.
253 *
254 * The modification state of any associated pages is left unchanged.
Zhi Yong Wub46fe822013-08-07 10:10:59 +0000255 * The buffer must not be on any hash - use xfs_buf_rele instead for
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 * hashed and refcounted buffers
257 */
258void
Nathan Scottce8e9222006-01-11 15:39:08 +1100259xfs_buf_free(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 xfs_buf_t *bp)
261{
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000262 trace_xfs_buf_free(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
Dave Chinner430cbeb2010-12-02 16:30:55 +1100264 ASSERT(list_empty(&bp->b_lru));
265
Dave Chinner0e6e8472011-03-26 09:16:45 +1100266 if (bp->b_flags & _XBF_PAGES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 uint i;
268
James Bottomley73c77e22010-01-25 11:42:24 -0600269 if (xfs_buf_is_vmapped(bp))
Alex Elder8a262e52010-03-16 18:55:56 +0000270 vm_unmap_ram(bp->b_addr - bp->b_offset,
271 bp->b_page_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
Nathan Scott948ecdb2006-09-28 11:03:13 +1000273 for (i = 0; i < bp->b_page_count; i++) {
274 struct page *page = bp->b_pages[i];
275
Dave Chinner0e6e8472011-03-26 09:16:45 +1100276 __free_page(page);
Nathan Scott948ecdb2006-09-28 11:03:13 +1000277 }
Dave Chinner0e6e8472011-03-26 09:16:45 +1100278 } else if (bp->b_flags & _XBF_KMEM)
279 kmem_free(bp->b_addr);
Dave Chinner3fc98b12009-12-14 23:11:57 +0000280 _xfs_buf_free_pages(bp);
Dave Chinner3e85c862012-06-22 18:50:09 +1000281 xfs_buf_free_maps(bp);
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000282 kmem_zone_free(xfs_buf_zone, bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283}
284
285/*
Dave Chinner0e6e8472011-03-26 09:16:45 +1100286 * Allocates all the pages for buffer in question and builds it's page list.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 */
288STATIC int
Dave Chinner0e6e8472011-03-26 09:16:45 +1100289xfs_buf_allocate_memory(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 xfs_buf_t *bp,
291 uint flags)
292{
Dave Chinneraa0e8832012-04-23 15:58:52 +1000293 size_t size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 size_t nbytes, offset;
Nathan Scottce8e9222006-01-11 15:39:08 +1100295 gfp_t gfp_mask = xb_to_gfp(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 unsigned short page_count, i;
Dave Chinner795cac72012-04-23 15:58:53 +1000297 xfs_off_t start, end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 int error;
299
Dave Chinner0e6e8472011-03-26 09:16:45 +1100300 /*
301 * for buffers that are contained within a single page, just allocate
302 * the memory from the heap - there's no need for the complexity of
303 * page arrays to keep allocation down to order 0.
304 */
Dave Chinner795cac72012-04-23 15:58:53 +1000305 size = BBTOB(bp->b_length);
306 if (size < PAGE_SIZE) {
Dave Chinneraa5c1582012-04-23 15:58:56 +1000307 bp->b_addr = kmem_alloc(size, KM_NOFS);
Dave Chinner0e6e8472011-03-26 09:16:45 +1100308 if (!bp->b_addr) {
309 /* low memory - use alloc_page loop instead */
310 goto use_alloc_page;
311 }
312
Dave Chinner795cac72012-04-23 15:58:53 +1000313 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
Dave Chinner0e6e8472011-03-26 09:16:45 +1100314 ((unsigned long)bp->b_addr & PAGE_MASK)) {
315 /* b_addr spans two pages - use alloc_page instead */
316 kmem_free(bp->b_addr);
317 bp->b_addr = NULL;
318 goto use_alloc_page;
319 }
320 bp->b_offset = offset_in_page(bp->b_addr);
321 bp->b_pages = bp->b_page_array;
322 bp->b_pages[0] = virt_to_page(bp->b_addr);
323 bp->b_page_count = 1;
Dave Chinner611c9942012-04-23 15:59:07 +1000324 bp->b_flags |= _XBF_KMEM;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100325 return 0;
326 }
327
328use_alloc_page:
Mark Tinguelyf4b42422012-12-04 17:18:02 -0600329 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
330 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
Dave Chinnercbb7baa2012-06-22 18:50:08 +1000331 >> PAGE_SHIFT;
Dave Chinner795cac72012-04-23 15:58:53 +1000332 page_count = end - start;
Nathan Scottce8e9222006-01-11 15:39:08 +1100333 error = _xfs_buf_get_pages(bp, page_count, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 if (unlikely(error))
335 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
Nathan Scottce8e9222006-01-11 15:39:08 +1100337 offset = bp->b_offset;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100338 bp->b_flags |= _XBF_PAGES;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339
Nathan Scottce8e9222006-01-11 15:39:08 +1100340 for (i = 0; i < bp->b_page_count; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 struct page *page;
342 uint retries = 0;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100343retry:
344 page = alloc_page(gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 if (unlikely(page == NULL)) {
Nathan Scottce8e9222006-01-11 15:39:08 +1100346 if (flags & XBF_READ_AHEAD) {
347 bp->b_page_count = i;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100348 error = ENOMEM;
349 goto out_free_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 }
351
352 /*
353 * This could deadlock.
354 *
355 * But until all the XFS lowlevel code is revamped to
356 * handle buffer allocation failures we can't do much.
357 */
358 if (!(++retries % 100))
Dave Chinner4f107002011-03-07 10:00:35 +1100359 xfs_err(NULL,
360 "possible memory allocation deadlock in %s (mode:0x%x)",
Harvey Harrison34a622b2008-04-10 12:19:21 +1000361 __func__, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
Nathan Scottce8e9222006-01-11 15:39:08 +1100363 XFS_STATS_INC(xb_page_retries);
Jens Axboe8aa7e842009-07-09 14:52:32 +0200364 congestion_wait(BLK_RW_ASYNC, HZ/50);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 goto retry;
366 }
367
Nathan Scottce8e9222006-01-11 15:39:08 +1100368 XFS_STATS_INC(xb_page_found);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
Dave Chinner0e6e8472011-03-26 09:16:45 +1100370 nbytes = min_t(size_t, size, PAGE_SIZE - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 size -= nbytes;
Nathan Scottce8e9222006-01-11 15:39:08 +1100372 bp->b_pages[i] = page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 offset = 0;
374 }
Dave Chinner0e6e8472011-03-26 09:16:45 +1100375 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376
Dave Chinner0e6e8472011-03-26 09:16:45 +1100377out_free_pages:
378 for (i = 0; i < bp->b_page_count; i++)
379 __free_page(bp->b_pages[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 return error;
381}
382
383/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300384 * Map buffer into kernel address-space if necessary.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 */
386STATIC int
Nathan Scottce8e9222006-01-11 15:39:08 +1100387_xfs_buf_map_pages(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 xfs_buf_t *bp,
389 uint flags)
390{
Dave Chinner0e6e8472011-03-26 09:16:45 +1100391 ASSERT(bp->b_flags & _XBF_PAGES);
Nathan Scottce8e9222006-01-11 15:39:08 +1100392 if (bp->b_page_count == 1) {
Dave Chinner0e6e8472011-03-26 09:16:45 +1100393 /* A single page buffer is always mappable */
Nathan Scottce8e9222006-01-11 15:39:08 +1100394 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
Dave Chinner611c9942012-04-23 15:59:07 +1000395 } else if (flags & XBF_UNMAPPED) {
396 bp->b_addr = NULL;
397 } else {
Dave Chinnera19fb382011-03-26 09:13:42 +1100398 int retried = 0;
399
400 do {
401 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
402 -1, PAGE_KERNEL);
403 if (bp->b_addr)
404 break;
405 vm_unmap_aliases();
406 } while (retried++ <= 1);
407
408 if (!bp->b_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 return -ENOMEM;
Nathan Scottce8e9222006-01-11 15:39:08 +1100410 bp->b_addr += bp->b_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 }
412
413 return 0;
414}
415
416/*
417 * Finding and Reading Buffers
418 */
419
420/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100421 * Look up, and creates if absent, a lockable buffer for
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 * a given range of an inode. The buffer is returned
Chandra Seetharamaneabbaf12011-09-08 20:18:50 +0000423 * locked. No I/O is implied by this call.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 */
425xfs_buf_t *
Nathan Scottce8e9222006-01-11 15:39:08 +1100426_xfs_buf_find(
Dave Chinnere70b73f2012-04-23 15:58:49 +1000427 struct xfs_buftarg *btp,
Dave Chinner3e85c862012-06-22 18:50:09 +1000428 struct xfs_buf_map *map,
429 int nmaps,
Nathan Scottce8e9222006-01-11 15:39:08 +1100430 xfs_buf_flags_t flags,
431 xfs_buf_t *new_bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432{
Dave Chinnere70b73f2012-04-23 15:58:49 +1000433 size_t numbytes;
Dave Chinner74f75a02010-09-24 19:59:04 +1000434 struct xfs_perag *pag;
435 struct rb_node **rbp;
436 struct rb_node *parent;
437 xfs_buf_t *bp;
Dave Chinner3e85c862012-06-22 18:50:09 +1000438 xfs_daddr_t blkno = map[0].bm_bn;
Dave Chinner10616b802013-01-21 23:53:52 +1100439 xfs_daddr_t eofs;
Dave Chinner3e85c862012-06-22 18:50:09 +1000440 int numblks = 0;
441 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
Dave Chinner3e85c862012-06-22 18:50:09 +1000443 for (i = 0; i < nmaps; i++)
444 numblks += map[i].bm_len;
Dave Chinnere70b73f2012-04-23 15:58:49 +1000445 numbytes = BBTOB(numblks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
447 /* Check for IOs smaller than the sector size / not sector aligned */
Dave Chinnere70b73f2012-04-23 15:58:49 +1000448 ASSERT(!(numbytes < (1 << btp->bt_sshift)));
Dave Chinnerde1cbee2012-04-23 15:58:50 +1000449 ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450
Dave Chinner10616b802013-01-21 23:53:52 +1100451 /*
452 * Corrupted block numbers can get through to here, unfortunately, so we
453 * have to check that the buffer falls within the filesystem bounds.
454 */
455 eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
456 if (blkno >= eofs) {
457 /*
458 * XXX (dgc): we should really be returning EFSCORRUPTED here,
459 * but none of the higher level infrastructure supports
460 * returning a specific error on buffer lookup failures.
461 */
462 xfs_alert(btp->bt_mount,
463 "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
464 __func__, blkno, eofs);
Dave Chinner7bc0dc22013-05-21 18:02:08 +1000465 WARN_ON(1);
Dave Chinner10616b802013-01-21 23:53:52 +1100466 return NULL;
467 }
468
Dave Chinner74f75a02010-09-24 19:59:04 +1000469 /* get tree root */
470 pag = xfs_perag_get(btp->bt_mount,
Dave Chinnere70b73f2012-04-23 15:58:49 +1000471 xfs_daddr_to_agno(btp->bt_mount, blkno));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
Dave Chinner74f75a02010-09-24 19:59:04 +1000473 /* walk tree */
474 spin_lock(&pag->pag_buf_lock);
475 rbp = &pag->pag_buf_tree.rb_node;
476 parent = NULL;
477 bp = NULL;
478 while (*rbp) {
479 parent = *rbp;
480 bp = rb_entry(parent, struct xfs_buf, b_rbnode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
Dave Chinnerde1cbee2012-04-23 15:58:50 +1000482 if (blkno < bp->b_bn)
Dave Chinner74f75a02010-09-24 19:59:04 +1000483 rbp = &(*rbp)->rb_left;
Dave Chinnerde1cbee2012-04-23 15:58:50 +1000484 else if (blkno > bp->b_bn)
Dave Chinner74f75a02010-09-24 19:59:04 +1000485 rbp = &(*rbp)->rb_right;
486 else {
487 /*
Dave Chinnerde1cbee2012-04-23 15:58:50 +1000488 * found a block number match. If the range doesn't
Dave Chinner74f75a02010-09-24 19:59:04 +1000489 * match, the only way this is allowed is if the buffer
490 * in the cache is stale and the transaction that made
491 * it stale has not yet committed. i.e. we are
492 * reallocating a busy extent. Skip this buffer and
493 * continue searching to the right for an exact match.
494 */
Dave Chinner4e94b712012-04-23 15:58:51 +1000495 if (bp->b_length != numblks) {
Dave Chinner74f75a02010-09-24 19:59:04 +1000496 ASSERT(bp->b_flags & XBF_STALE);
497 rbp = &(*rbp)->rb_right;
498 continue;
499 }
Nathan Scottce8e9222006-01-11 15:39:08 +1100500 atomic_inc(&bp->b_hold);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 goto found;
502 }
503 }
504
505 /* No match found */
Nathan Scottce8e9222006-01-11 15:39:08 +1100506 if (new_bp) {
Dave Chinner74f75a02010-09-24 19:59:04 +1000507 rb_link_node(&new_bp->b_rbnode, parent, rbp);
508 rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
509 /* the buffer keeps the perag reference until it is freed */
510 new_bp->b_pag = pag;
511 spin_unlock(&pag->pag_buf_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 } else {
Nathan Scottce8e9222006-01-11 15:39:08 +1100513 XFS_STATS_INC(xb_miss_locked);
Dave Chinner74f75a02010-09-24 19:59:04 +1000514 spin_unlock(&pag->pag_buf_lock);
515 xfs_perag_put(pag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 }
Nathan Scottce8e9222006-01-11 15:39:08 +1100517 return new_bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
519found:
Dave Chinner74f75a02010-09-24 19:59:04 +1000520 spin_unlock(&pag->pag_buf_lock);
521 xfs_perag_put(pag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200523 if (!xfs_buf_trylock(bp)) {
524 if (flags & XBF_TRYLOCK) {
Nathan Scottce8e9222006-01-11 15:39:08 +1100525 xfs_buf_rele(bp);
526 XFS_STATS_INC(xb_busy_locked);
527 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 }
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200529 xfs_buf_lock(bp);
530 XFS_STATS_INC(xb_get_locked_waited);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 }
532
Dave Chinner0e6e8472011-03-26 09:16:45 +1100533 /*
534 * if the buffer is stale, clear all the external state associated with
535 * it. We need to keep flags such as how we allocated the buffer memory
536 * intact here.
537 */
Nathan Scottce8e9222006-01-11 15:39:08 +1100538 if (bp->b_flags & XBF_STALE) {
539 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
Dave Chinnercfb02852012-11-12 22:54:19 +1100540 ASSERT(bp->b_iodone == NULL);
Dave Chinner611c9942012-04-23 15:59:07 +1000541 bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
Dave Chinner1813dd62012-11-14 17:54:40 +1100542 bp->b_ops = NULL;
David Chinner2f926582005-09-05 08:33:35 +1000543 }
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000544
545 trace_xfs_buf_find(bp, flags, _RET_IP_);
Nathan Scottce8e9222006-01-11 15:39:08 +1100546 XFS_STATS_INC(xb_get_locked);
547 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548}
549
550/*
Dave Chinner38158322011-09-30 04:45:02 +0000551 * Assembles a buffer covering the specified range. The code is optimised for
552 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
553 * more hits than misses.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 */
Dave Chinner38158322011-09-30 04:45:02 +0000555struct xfs_buf *
Dave Chinner6dde2702012-06-22 18:50:10 +1000556xfs_buf_get_map(
557 struct xfs_buftarg *target,
558 struct xfs_buf_map *map,
559 int nmaps,
Nathan Scottce8e9222006-01-11 15:39:08 +1100560 xfs_buf_flags_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561{
Dave Chinner38158322011-09-30 04:45:02 +0000562 struct xfs_buf *bp;
563 struct xfs_buf *new_bp;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100564 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565
Dave Chinner6dde2702012-06-22 18:50:10 +1000566 bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
Dave Chinner38158322011-09-30 04:45:02 +0000567 if (likely(bp))
568 goto found;
569
Dave Chinner6dde2702012-06-22 18:50:10 +1000570 new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
Nathan Scottce8e9222006-01-11 15:39:08 +1100571 if (unlikely(!new_bp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 return NULL;
573
Dave Chinnerfe2429b2012-04-23 15:58:45 +1000574 error = xfs_buf_allocate_memory(new_bp, flags);
575 if (error) {
Dave Chinner3e85c862012-06-22 18:50:09 +1000576 xfs_buf_free(new_bp);
Dave Chinner38158322011-09-30 04:45:02 +0000577 return NULL;
578 }
579
Dave Chinner6dde2702012-06-22 18:50:10 +1000580 bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
Dave Chinnerfe2429b2012-04-23 15:58:45 +1000581 if (!bp) {
582 xfs_buf_free(new_bp);
583 return NULL;
584 }
585
586 if (bp != new_bp)
587 xfs_buf_free(new_bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
Dave Chinner38158322011-09-30 04:45:02 +0000589found:
Dave Chinner611c9942012-04-23 15:59:07 +1000590 if (!bp->b_addr) {
Nathan Scottce8e9222006-01-11 15:39:08 +1100591 error = _xfs_buf_map_pages(bp, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 if (unlikely(error)) {
Dave Chinner4f107002011-03-07 10:00:35 +1100593 xfs_warn(target->bt_mount,
Eric Sandeen08e96e12013-10-11 20:59:05 -0500594 "%s: failed to map pagesn", __func__);
Dave Chinnera8acad72012-04-23 15:58:54 +1000595 xfs_buf_relse(bp);
596 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 }
598 }
599
Nathan Scottce8e9222006-01-11 15:39:08 +1100600 XFS_STATS_INC(xb_get);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000601 trace_xfs_buf_get(bp, flags, _RET_IP_);
Nathan Scottce8e9222006-01-11 15:39:08 +1100602 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603}
604
Christoph Hellwig5d765b92008-12-03 12:20:26 +0100605STATIC int
606_xfs_buf_read(
607 xfs_buf_t *bp,
608 xfs_buf_flags_t flags)
609{
Christoph Hellwig43ff2122012-04-23 15:58:39 +1000610 ASSERT(!(flags & XBF_WRITE));
Mark Tinguelyf4b42422012-12-04 17:18:02 -0600611 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
Christoph Hellwig5d765b92008-12-03 12:20:26 +0100612
Christoph Hellwig43ff2122012-04-23 15:58:39 +1000613 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
Christoph Hellwig1d5ae5d2011-07-08 14:36:32 +0200614 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
Christoph Hellwig5d765b92008-12-03 12:20:26 +0100615
Dave Chinner0e95f192012-04-23 15:58:46 +1000616 xfs_buf_iorequest(bp);
617 if (flags & XBF_ASYNC)
618 return 0;
Dave Chinnerec53d1d2010-07-20 17:52:59 +1000619 return xfs_buf_iowait(bp);
Christoph Hellwig5d765b92008-12-03 12:20:26 +0100620}
621
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622xfs_buf_t *
Dave Chinner6dde2702012-06-22 18:50:10 +1000623xfs_buf_read_map(
624 struct xfs_buftarg *target,
625 struct xfs_buf_map *map,
626 int nmaps,
Dave Chinnerc3f8fc72012-11-12 22:54:01 +1100627 xfs_buf_flags_t flags,
Dave Chinner1813dd62012-11-14 17:54:40 +1100628 const struct xfs_buf_ops *ops)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629{
Dave Chinner6dde2702012-06-22 18:50:10 +1000630 struct xfs_buf *bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631
Nathan Scottce8e9222006-01-11 15:39:08 +1100632 flags |= XBF_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633
Dave Chinner6dde2702012-06-22 18:50:10 +1000634 bp = xfs_buf_get_map(target, map, nmaps, flags);
Nathan Scottce8e9222006-01-11 15:39:08 +1100635 if (bp) {
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000636 trace_xfs_buf_read(bp, flags, _RET_IP_);
637
Nathan Scottce8e9222006-01-11 15:39:08 +1100638 if (!XFS_BUF_ISDONE(bp)) {
Nathan Scottce8e9222006-01-11 15:39:08 +1100639 XFS_STATS_INC(xb_get_read);
Dave Chinner1813dd62012-11-14 17:54:40 +1100640 bp->b_ops = ops;
Christoph Hellwig5d765b92008-12-03 12:20:26 +0100641 _xfs_buf_read(bp, flags);
Nathan Scottce8e9222006-01-11 15:39:08 +1100642 } else if (flags & XBF_ASYNC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 /*
644 * Read ahead call which is already satisfied,
645 * drop the buffer
646 */
Dave Chinnera8acad72012-04-23 15:58:54 +1000647 xfs_buf_relse(bp);
648 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 /* We do not want read in the flags */
Nathan Scottce8e9222006-01-11 15:39:08 +1100651 bp->b_flags &= ~XBF_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 }
653 }
654
Nathan Scottce8e9222006-01-11 15:39:08 +1100655 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656}
657
658/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100659 * If we are not low on memory then do the readahead in a deadlock
660 * safe manner.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 */
662void
Dave Chinner6dde2702012-06-22 18:50:10 +1000663xfs_buf_readahead_map(
664 struct xfs_buftarg *target,
665 struct xfs_buf_map *map,
Dave Chinnerc3f8fc72012-11-12 22:54:01 +1100666 int nmaps,
Dave Chinner1813dd62012-11-14 17:54:40 +1100667 const struct xfs_buf_ops *ops)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668{
Dave Chinner0e6e8472011-03-26 09:16:45 +1100669 if (bdi_read_congested(target->bt_bdi))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 return;
671
Dave Chinner6dde2702012-06-22 18:50:10 +1000672 xfs_buf_read_map(target, map, nmaps,
Dave Chinner1813dd62012-11-14 17:54:40 +1100673 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674}
675
Dave Chinner5adc94c2010-09-24 21:58:31 +1000676/*
677 * Read an uncached buffer from disk. Allocates and returns a locked
678 * buffer containing the disk contents or nothing.
679 */
680struct xfs_buf *
681xfs_buf_read_uncached(
Dave Chinner5adc94c2010-09-24 21:58:31 +1000682 struct xfs_buftarg *target,
683 xfs_daddr_t daddr,
Dave Chinnere70b73f2012-04-23 15:58:49 +1000684 size_t numblks,
Dave Chinnerc3f8fc72012-11-12 22:54:01 +1100685 int flags,
Dave Chinner1813dd62012-11-14 17:54:40 +1100686 const struct xfs_buf_ops *ops)
Dave Chinner5adc94c2010-09-24 21:58:31 +1000687{
Dave Chinnereab4e632012-11-12 22:54:02 +1100688 struct xfs_buf *bp;
Dave Chinner5adc94c2010-09-24 21:58:31 +1000689
Dave Chinnere70b73f2012-04-23 15:58:49 +1000690 bp = xfs_buf_get_uncached(target, numblks, flags);
Dave Chinner5adc94c2010-09-24 21:58:31 +1000691 if (!bp)
692 return NULL;
693
694 /* set up the buffer for a read IO */
Dave Chinner3e85c862012-06-22 18:50:09 +1000695 ASSERT(bp->b_map_count == 1);
696 bp->b_bn = daddr;
697 bp->b_maps[0].bm_bn = daddr;
Dave Chinnercbb7baa2012-06-22 18:50:08 +1000698 bp->b_flags |= XBF_READ;
Dave Chinner1813dd62012-11-14 17:54:40 +1100699 bp->b_ops = ops;
Dave Chinner5adc94c2010-09-24 21:58:31 +1000700
Christoph Hellwig83a0adc2013-12-17 00:03:52 -0800701 if (XFS_FORCED_SHUTDOWN(target->bt_mount)) {
702 xfs_buf_relse(bp);
703 return NULL;
704 }
705 xfs_buf_iorequest(bp);
Dave Chinnereab4e632012-11-12 22:54:02 +1100706 xfs_buf_iowait(bp);
Dave Chinner5adc94c2010-09-24 21:58:31 +1000707 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708}
709
Dave Chinner44396472011-04-21 09:34:27 +0000710/*
711 * Return a buffer allocated as an empty buffer and associated to external
712 * memory via xfs_buf_associate_memory() back to it's empty state.
713 */
714void
715xfs_buf_set_empty(
716 struct xfs_buf *bp,
Dave Chinnere70b73f2012-04-23 15:58:49 +1000717 size_t numblks)
Dave Chinner44396472011-04-21 09:34:27 +0000718{
719 if (bp->b_pages)
720 _xfs_buf_free_pages(bp);
721
722 bp->b_pages = NULL;
723 bp->b_page_count = 0;
724 bp->b_addr = NULL;
Dave Chinner4e94b712012-04-23 15:58:51 +1000725 bp->b_length = numblks;
Dave Chinneraa0e8832012-04-23 15:58:52 +1000726 bp->b_io_length = numblks;
Dave Chinner3e85c862012-06-22 18:50:09 +1000727
728 ASSERT(bp->b_map_count == 1);
Dave Chinner44396472011-04-21 09:34:27 +0000729 bp->b_bn = XFS_BUF_DADDR_NULL;
Dave Chinner3e85c862012-06-22 18:50:09 +1000730 bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
731 bp->b_maps[0].bm_len = bp->b_length;
Dave Chinner44396472011-04-21 09:34:27 +0000732}
733
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734static inline struct page *
735mem_to_page(
736 void *addr)
737{
Christoph Lameter9e2779f2008-02-04 22:28:34 -0800738 if ((!is_vmalloc_addr(addr))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 return virt_to_page(addr);
740 } else {
741 return vmalloc_to_page(addr);
742 }
743}
744
745int
Nathan Scottce8e9222006-01-11 15:39:08 +1100746xfs_buf_associate_memory(
747 xfs_buf_t *bp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 void *mem,
749 size_t len)
750{
751 int rval;
752 int i = 0;
Lachlan McIlroyd1afb672007-11-27 17:01:24 +1100753 unsigned long pageaddr;
754 unsigned long offset;
755 size_t buflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 int page_count;
757
Dave Chinner0e6e8472011-03-26 09:16:45 +1100758 pageaddr = (unsigned long)mem & PAGE_MASK;
Lachlan McIlroyd1afb672007-11-27 17:01:24 +1100759 offset = (unsigned long)mem - pageaddr;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100760 buflen = PAGE_ALIGN(len + offset);
761 page_count = buflen >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
763 /* Free any previous set of page pointers */
Nathan Scottce8e9222006-01-11 15:39:08 +1100764 if (bp->b_pages)
765 _xfs_buf_free_pages(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766
Nathan Scottce8e9222006-01-11 15:39:08 +1100767 bp->b_pages = NULL;
768 bp->b_addr = mem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
Dave Chinneraa5c1582012-04-23 15:58:56 +1000770 rval = _xfs_buf_get_pages(bp, page_count, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 if (rval)
772 return rval;
773
Nathan Scottce8e9222006-01-11 15:39:08 +1100774 bp->b_offset = offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775
Lachlan McIlroyd1afb672007-11-27 17:01:24 +1100776 for (i = 0; i < bp->b_page_count; i++) {
777 bp->b_pages[i] = mem_to_page((void *)pageaddr);
Dave Chinner0e6e8472011-03-26 09:16:45 +1100778 pageaddr += PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780
Dave Chinneraa0e8832012-04-23 15:58:52 +1000781 bp->b_io_length = BTOBB(len);
Dave Chinner4e94b712012-04-23 15:58:51 +1000782 bp->b_length = BTOBB(buflen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783
784 return 0;
785}
786
787xfs_buf_t *
Dave Chinner686865f2010-09-24 20:07:47 +1000788xfs_buf_get_uncached(
789 struct xfs_buftarg *target,
Dave Chinnere70b73f2012-04-23 15:58:49 +1000790 size_t numblks,
Dave Chinner686865f2010-09-24 20:07:47 +1000791 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792{
Dave Chinnere70b73f2012-04-23 15:58:49 +1000793 unsigned long page_count;
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000794 int error, i;
Dave Chinner3e85c862012-06-22 18:50:09 +1000795 struct xfs_buf *bp;
796 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797
Dave Chinner3e85c862012-06-22 18:50:09 +1000798 bp = _xfs_buf_alloc(target, &map, 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 if (unlikely(bp == NULL))
800 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
Dave Chinnere70b73f2012-04-23 15:58:49 +1000802 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000803 error = _xfs_buf_get_pages(bp, page_count, 0);
804 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 goto fail_free_buf;
806
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000807 for (i = 0; i < page_count; i++) {
Dave Chinner686865f2010-09-24 20:07:47 +1000808 bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000809 if (!bp->b_pages[i])
810 goto fail_free_mem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 }
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000812 bp->b_flags |= _XBF_PAGES;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813
Dave Chinner611c9942012-04-23 15:59:07 +1000814 error = _xfs_buf_map_pages(bp, 0);
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000815 if (unlikely(error)) {
Dave Chinner4f107002011-03-07 10:00:35 +1100816 xfs_warn(target->bt_mount,
Eric Sandeen08e96e12013-10-11 20:59:05 -0500817 "%s: failed to map pages", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 goto fail_free_mem;
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000819 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820
Dave Chinner686865f2010-09-24 20:07:47 +1000821 trace_xfs_buf_get_uncached(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 return bp;
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000823
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 fail_free_mem:
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000825 while (--i >= 0)
826 __free_page(bp->b_pages[i]);
Christoph Hellwigca165b82007-05-24 15:21:11 +1000827 _xfs_buf_free_pages(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 fail_free_buf:
Dave Chinner3e85c862012-06-22 18:50:09 +1000829 xfs_buf_free_maps(bp);
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000830 kmem_zone_free(xfs_buf_zone, bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 fail:
832 return NULL;
833}
834
835/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 * Increment reference count on buffer, to hold the buffer concurrently
837 * with another thread which may release (free) the buffer asynchronously.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 * Must hold the buffer already to call this function.
839 */
840void
Nathan Scottce8e9222006-01-11 15:39:08 +1100841xfs_buf_hold(
842 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843{
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000844 trace_xfs_buf_hold(bp, _RET_IP_);
Nathan Scottce8e9222006-01-11 15:39:08 +1100845 atomic_inc(&bp->b_hold);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846}
847
848/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100849 * Releases a hold on the specified buffer. If the
850 * the hold count is 1, calls xfs_buf_free.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 */
852void
Nathan Scottce8e9222006-01-11 15:39:08 +1100853xfs_buf_rele(
854 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855{
Dave Chinner74f75a02010-09-24 19:59:04 +1000856 struct xfs_perag *pag = bp->b_pag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000858 trace_xfs_buf_rele(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859
Dave Chinner74f75a02010-09-24 19:59:04 +1000860 if (!pag) {
Dave Chinner430cbeb2010-12-02 16:30:55 +1100861 ASSERT(list_empty(&bp->b_lru));
Dave Chinner74f75a02010-09-24 19:59:04 +1000862 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
Nathan Scottfad3aa12006-02-01 12:14:52 +1100863 if (atomic_dec_and_test(&bp->b_hold))
864 xfs_buf_free(bp);
865 return;
866 }
867
Dave Chinner74f75a02010-09-24 19:59:04 +1000868 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
Dave Chinner430cbeb2010-12-02 16:30:55 +1100869
Lachlan McIlroy37906892008-08-13 15:42:10 +1000870 ASSERT(atomic_read(&bp->b_hold) > 0);
Dave Chinner74f75a02010-09-24 19:59:04 +1000871 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
Dave Chinnera4082352013-08-28 10:18:06 +1000872 spin_lock(&bp->b_lock);
873 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
874 /*
875 * If the buffer is added to the LRU take a new
876 * reference to the buffer for the LRU and clear the
877 * (now stale) dispose list state flag
878 */
879 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
880 bp->b_state &= ~XFS_BSTATE_DISPOSE;
881 atomic_inc(&bp->b_hold);
882 }
883 spin_unlock(&bp->b_lock);
Dave Chinner430cbeb2010-12-02 16:30:55 +1100884 spin_unlock(&pag->pag_buf_lock);
Christoph Hellwig7f14d0a2005-11-02 15:09:35 +1100885 } else {
Dave Chinnera4082352013-08-28 10:18:06 +1000886 /*
887 * most of the time buffers will already be removed from
888 * the LRU, so optimise that case by checking for the
889 * XFS_BSTATE_DISPOSE flag indicating the last list the
890 * buffer was on was the disposal list
891 */
892 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
893 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
894 } else {
895 ASSERT(list_empty(&bp->b_lru));
896 }
897 spin_unlock(&bp->b_lock);
898
Christoph Hellwig43ff2122012-04-23 15:58:39 +1000899 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
Dave Chinner74f75a02010-09-24 19:59:04 +1000900 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
901 spin_unlock(&pag->pag_buf_lock);
902 xfs_perag_put(pag);
Nathan Scottce8e9222006-01-11 15:39:08 +1100903 xfs_buf_free(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 }
905 }
906}
907
908
909/*
Dave Chinner0e6e8472011-03-26 09:16:45 +1100910 * Lock a buffer object, if it is not already locked.
Dave Chinner90810b92010-11-30 15:16:16 +1100911 *
912 * If we come across a stale, pinned, locked buffer, we know that we are
913 * being asked to lock a buffer that has been reallocated. Because it is
914 * pinned, we know that the log has not been pushed to disk and hence it
915 * will still be locked. Rather than continuing to have trylock attempts
916 * fail until someone else pushes the log, push it ourselves before
917 * returning. This means that the xfsaild will not get stuck trying
918 * to push on stale inode buffers.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 */
920int
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200921xfs_buf_trylock(
922 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923{
924 int locked;
925
Nathan Scottce8e9222006-01-11 15:39:08 +1100926 locked = down_trylock(&bp->b_sema) == 0;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000927 if (locked)
Nathan Scottce8e9222006-01-11 15:39:08 +1100928 XB_SET_OWNER(bp);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000929
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200930 trace_xfs_buf_trylock(bp, _RET_IP_);
931 return locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933
934/*
Dave Chinner0e6e8472011-03-26 09:16:45 +1100935 * Lock a buffer object.
Dave Chinnered3b4d62010-05-21 12:07:08 +1000936 *
937 * If we come across a stale, pinned, locked buffer, we know that we
938 * are being asked to lock a buffer that has been reallocated. Because
939 * it is pinned, we know that the log has not been pushed to disk and
940 * hence it will still be locked. Rather than sleeping until someone
941 * else pushes the log, push it ourselves before trying to get the lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 */
Nathan Scottce8e9222006-01-11 15:39:08 +1100943void
944xfs_buf_lock(
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200945 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946{
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000947 trace_xfs_buf_lock(bp, _RET_IP_);
948
Dave Chinnered3b4d62010-05-21 12:07:08 +1000949 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
Dave Chinnerebad8612010-09-22 10:47:20 +1000950 xfs_log_force(bp->b_target->bt_mount, 0);
Nathan Scottce8e9222006-01-11 15:39:08 +1100951 down(&bp->b_sema);
952 XB_SET_OWNER(bp);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000953
954 trace_xfs_buf_lock_done(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955}
956
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957void
Nathan Scottce8e9222006-01-11 15:39:08 +1100958xfs_buf_unlock(
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200959 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960{
Nathan Scottce8e9222006-01-11 15:39:08 +1100961 XB_CLEAR_OWNER(bp);
962 up(&bp->b_sema);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000963
964 trace_xfs_buf_unlock(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965}
966
Nathan Scottce8e9222006-01-11 15:39:08 +1100967STATIC void
968xfs_buf_wait_unpin(
969 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970{
971 DECLARE_WAITQUEUE (wait, current);
972
Nathan Scottce8e9222006-01-11 15:39:08 +1100973 if (atomic_read(&bp->b_pin_count) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 return;
975
Nathan Scottce8e9222006-01-11 15:39:08 +1100976 add_wait_queue(&bp->b_waiters, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 for (;;) {
978 set_current_state(TASK_UNINTERRUPTIBLE);
Nathan Scottce8e9222006-01-11 15:39:08 +1100979 if (atomic_read(&bp->b_pin_count) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 break;
Jens Axboe7eaceac2011-03-10 08:52:07 +0100981 io_schedule();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 }
Nathan Scottce8e9222006-01-11 15:39:08 +1100983 remove_wait_queue(&bp->b_waiters, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 set_current_state(TASK_RUNNING);
985}
986
987/*
988 * Buffer Utility Routines
989 */
990
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991STATIC void
Nathan Scottce8e9222006-01-11 15:39:08 +1100992xfs_buf_iodone_work(
David Howellsc4028952006-11-22 14:57:56 +0000993 struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994{
Dave Chinner1813dd62012-11-14 17:54:40 +1100995 struct xfs_buf *bp =
David Howellsc4028952006-11-22 14:57:56 +0000996 container_of(work, xfs_buf_t, b_iodone_work);
Dave Chinner1813dd62012-11-14 17:54:40 +1100997 bool read = !!(bp->b_flags & XBF_READ);
998
999 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
Dave Chinnerd5929de2013-02-27 13:25:54 +11001000
1001 /* only validate buffers that were read without errors */
1002 if (read && bp->b_ops && !bp->b_error && (bp->b_flags & XBF_DONE))
Dave Chinner1813dd62012-11-14 17:54:40 +11001003 bp->b_ops->verify_read(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
Christoph Hellwig80f6c292010-08-18 05:29:11 -04001005 if (bp->b_iodone)
Nathan Scottce8e9222006-01-11 15:39:08 +11001006 (*(bp->b_iodone))(bp);
1007 else if (bp->b_flags & XBF_ASYNC)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 xfs_buf_relse(bp);
Dave Chinner1813dd62012-11-14 17:54:40 +11001009 else {
1010 ASSERT(read && bp->b_ops);
1011 complete(&bp->b_iowait);
1012 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013}
1014
1015void
Nathan Scottce8e9222006-01-11 15:39:08 +11001016xfs_buf_ioend(
Dave Chinner1813dd62012-11-14 17:54:40 +11001017 struct xfs_buf *bp,
1018 int schedule)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019{
Dave Chinner1813dd62012-11-14 17:54:40 +11001020 bool read = !!(bp->b_flags & XBF_READ);
1021
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001022 trace_xfs_buf_iodone(bp, _RET_IP_);
1023
Nathan Scottce8e9222006-01-11 15:39:08 +11001024 if (bp->b_error == 0)
1025 bp->b_flags |= XBF_DONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026
Dave Chinner1813dd62012-11-14 17:54:40 +11001027 if (bp->b_iodone || (read && bp->b_ops) || (bp->b_flags & XBF_ASYNC)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 if (schedule) {
David Howellsc4028952006-11-22 14:57:56 +00001029 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
Nathan Scottce8e9222006-01-11 15:39:08 +11001030 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 } else {
David Howellsc4028952006-11-22 14:57:56 +00001032 xfs_buf_iodone_work(&bp->b_iodone_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 }
1034 } else {
Dave Chinner1813dd62012-11-14 17:54:40 +11001035 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
David Chinnerb4dd3302008-08-13 16:36:11 +10001036 complete(&bp->b_iowait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 }
1038}
1039
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040void
Nathan Scottce8e9222006-01-11 15:39:08 +11001041xfs_buf_ioerror(
1042 xfs_buf_t *bp,
1043 int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044{
1045 ASSERT(error >= 0 && error <= 0xffff);
Nathan Scottce8e9222006-01-11 15:39:08 +11001046 bp->b_error = (unsigned short)error;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001047 trace_xfs_buf_ioerror(bp, error, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048}
1049
Christoph Hellwig901796a2011-10-10 16:52:49 +00001050void
1051xfs_buf_ioerror_alert(
1052 struct xfs_buf *bp,
1053 const char *func)
1054{
1055 xfs_alert(bp->b_target->bt_mount,
Dave Chinneraa0e8832012-04-23 15:58:52 +10001056"metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
1057 (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
Christoph Hellwig901796a2011-10-10 16:52:49 +00001058}
1059
Christoph Hellwig4e234712010-01-13 22:17:56 +00001060/*
1061 * Called when we want to stop a buffer from getting written or read.
Christoph Hellwig1a1a3e92010-10-06 18:41:18 +00001062 * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
Christoph Hellwig4e234712010-01-13 22:17:56 +00001063 * so that the proper iodone callbacks get called.
1064 */
1065STATIC int
1066xfs_bioerror(
1067 xfs_buf_t *bp)
1068{
1069#ifdef XFSERRORDEBUG
1070 ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
1071#endif
1072
1073 /*
1074 * No need to wait until the buffer is unpinned, we aren't flushing it.
1075 */
Chandra Seetharaman5a52c2a582011-07-22 23:39:51 +00001076 xfs_buf_ioerror(bp, EIO);
Christoph Hellwig4e234712010-01-13 22:17:56 +00001077
1078 /*
Christoph Hellwig1a1a3e92010-10-06 18:41:18 +00001079 * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
Christoph Hellwig4e234712010-01-13 22:17:56 +00001080 */
1081 XFS_BUF_UNREAD(bp);
Christoph Hellwig4e234712010-01-13 22:17:56 +00001082 XFS_BUF_UNDONE(bp);
Christoph Hellwigc867cb62011-10-10 16:52:46 +00001083 xfs_buf_stale(bp);
Christoph Hellwig4e234712010-01-13 22:17:56 +00001084
Christoph Hellwig1a1a3e92010-10-06 18:41:18 +00001085 xfs_buf_ioend(bp, 0);
Christoph Hellwig4e234712010-01-13 22:17:56 +00001086
1087 return EIO;
1088}
1089
1090/*
1091 * Same as xfs_bioerror, except that we are releasing the buffer
Christoph Hellwig1a1a3e92010-10-06 18:41:18 +00001092 * here ourselves, and avoiding the xfs_buf_ioend call.
Christoph Hellwig4e234712010-01-13 22:17:56 +00001093 * This is meant for userdata errors; metadata bufs come with
1094 * iodone functions attached, so that we can track down errors.
1095 */
Christoph Hellwig83a0adc2013-12-17 00:03:52 -08001096int
Christoph Hellwig4e234712010-01-13 22:17:56 +00001097xfs_bioerror_relse(
1098 struct xfs_buf *bp)
1099{
Chandra Seetharamaned432332011-07-22 23:39:39 +00001100 int64_t fl = bp->b_flags;
Christoph Hellwig4e234712010-01-13 22:17:56 +00001101 /*
1102 * No need to wait until the buffer is unpinned.
1103 * We aren't flushing it.
1104 *
1105 * chunkhold expects B_DONE to be set, whether
1106 * we actually finish the I/O or not. We don't want to
1107 * change that interface.
1108 */
1109 XFS_BUF_UNREAD(bp);
Christoph Hellwig4e234712010-01-13 22:17:56 +00001110 XFS_BUF_DONE(bp);
Christoph Hellwigc867cb62011-10-10 16:52:46 +00001111 xfs_buf_stale(bp);
Christoph Hellwigcb669ca2011-07-13 13:43:49 +02001112 bp->b_iodone = NULL;
Christoph Hellwig0cadda12010-01-19 09:56:44 +00001113 if (!(fl & XBF_ASYNC)) {
Christoph Hellwig4e234712010-01-13 22:17:56 +00001114 /*
1115 * Mark b_error and B_ERROR _both_.
1116 * Lot's of chunkcache code assumes that.
1117 * There's no reason to mark error for
1118 * ASYNC buffers.
1119 */
Chandra Seetharaman5a52c2a582011-07-22 23:39:51 +00001120 xfs_buf_ioerror(bp, EIO);
Christoph Hellwig5fde0322011-10-10 16:52:44 +00001121 complete(&bp->b_iowait);
Christoph Hellwig4e234712010-01-13 22:17:56 +00001122 } else {
1123 xfs_buf_relse(bp);
1124 }
1125
1126 return EIO;
1127}
1128
Christoph Hellwiga2dcf5d2012-07-13 02:24:10 -04001129STATIC int
Christoph Hellwig4e234712010-01-13 22:17:56 +00001130xfs_bdstrat_cb(
1131 struct xfs_buf *bp)
1132{
Dave Chinnerebad8612010-09-22 10:47:20 +10001133 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
Christoph Hellwig4e234712010-01-13 22:17:56 +00001134 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1135 /*
1136 * Metadata write that didn't get logged but
1137 * written delayed anyway. These aren't associated
1138 * with a transaction, and can be ignored.
1139 */
1140 if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
1141 return xfs_bioerror_relse(bp);
1142 else
1143 return xfs_bioerror(bp);
1144 }
1145
1146 xfs_buf_iorequest(bp);
1147 return 0;
1148}
1149
Christoph Hellwiga2dcf5d2012-07-13 02:24:10 -04001150int
1151xfs_bwrite(
1152 struct xfs_buf *bp)
1153{
1154 int error;
1155
1156 ASSERT(xfs_buf_islocked(bp));
1157
1158 bp->b_flags |= XBF_WRITE;
1159 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
1160
1161 xfs_bdstrat_cb(bp);
1162
1163 error = xfs_buf_iowait(bp);
1164 if (error) {
1165 xfs_force_shutdown(bp->b_target->bt_mount,
1166 SHUTDOWN_META_IO_ERROR);
1167 }
1168 return error;
1169}
1170
Christoph Hellwigb8f82a42009-11-14 16:17:22 +00001171STATIC void
Nathan Scottce8e9222006-01-11 15:39:08 +11001172_xfs_buf_ioend(
1173 xfs_buf_t *bp,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 int schedule)
1175{
Dave Chinner0e6e8472011-03-26 09:16:45 +11001176 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
Nathan Scottce8e9222006-01-11 15:39:08 +11001177 xfs_buf_ioend(bp, schedule);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178}
1179
Al Viro782e3b32007-10-12 07:17:47 +01001180STATIC void
Nathan Scottce8e9222006-01-11 15:39:08 +11001181xfs_buf_bio_end_io(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 struct bio *bio,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 int error)
1184{
Nathan Scottce8e9222006-01-11 15:39:08 +11001185 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186
Dave Chinner37eb17e2012-11-12 22:09:46 +11001187 /*
1188 * don't overwrite existing errors - otherwise we can lose errors on
1189 * buffers that require multiple bios to complete.
1190 */
1191 if (!bp->b_error)
1192 xfs_buf_ioerror(bp, -error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193
Dave Chinner37eb17e2012-11-12 22:09:46 +11001194 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
James Bottomley73c77e22010-01-25 11:42:24 -06001195 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1196
Nathan Scottce8e9222006-01-11 15:39:08 +11001197 _xfs_buf_ioend(bp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199}
1200
Dave Chinner3e85c862012-06-22 18:50:09 +10001201static void
1202xfs_buf_ioapply_map(
1203 struct xfs_buf *bp,
1204 int map,
1205 int *buf_offset,
1206 int *count,
1207 int rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208{
Dave Chinner3e85c862012-06-22 18:50:09 +10001209 int page_index;
1210 int total_nr_pages = bp->b_page_count;
1211 int nr_pages;
1212 struct bio *bio;
1213 sector_t sector = bp->b_maps[map].bm_bn;
1214 int size;
1215 int offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216
Nathan Scottce8e9222006-01-11 15:39:08 +11001217 total_nr_pages = bp->b_page_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
Dave Chinner3e85c862012-06-22 18:50:09 +10001219 /* skip the pages in the buffer before the start offset */
1220 page_index = 0;
1221 offset = *buf_offset;
1222 while (offset >= PAGE_SIZE) {
1223 page_index++;
1224 offset -= PAGE_SIZE;
Christoph Hellwigf538d4d2005-11-02 10:26:59 +11001225 }
1226
Dave Chinner3e85c862012-06-22 18:50:09 +10001227 /*
1228 * Limit the IO size to the length of the current vector, and update the
1229 * remaining IO count for the next time around.
1230 */
1231 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1232 *count -= size;
1233 *buf_offset += size;
Christoph Hellwig34951f52011-07-26 15:06:44 +00001234
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235next_chunk:
Nathan Scottce8e9222006-01-11 15:39:08 +11001236 atomic_inc(&bp->b_io_remaining);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1238 if (nr_pages > total_nr_pages)
1239 nr_pages = total_nr_pages;
1240
1241 bio = bio_alloc(GFP_NOIO, nr_pages);
Nathan Scottce8e9222006-01-11 15:39:08 +11001242 bio->bi_bdev = bp->b_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 bio->bi_sector = sector;
Nathan Scottce8e9222006-01-11 15:39:08 +11001244 bio->bi_end_io = xfs_buf_bio_end_io;
1245 bio->bi_private = bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246
Dave Chinner0e6e8472011-03-26 09:16:45 +11001247
Dave Chinner3e85c862012-06-22 18:50:09 +10001248 for (; size && nr_pages; nr_pages--, page_index++) {
Dave Chinner0e6e8472011-03-26 09:16:45 +11001249 int rbytes, nbytes = PAGE_SIZE - offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250
1251 if (nbytes > size)
1252 nbytes = size;
1253
Dave Chinner3e85c862012-06-22 18:50:09 +10001254 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1255 offset);
Nathan Scottce8e9222006-01-11 15:39:08 +11001256 if (rbytes < nbytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 break;
1258
1259 offset = 0;
Dave Chinneraa0e8832012-04-23 15:58:52 +10001260 sector += BTOBB(nbytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 size -= nbytes;
1262 total_nr_pages--;
1263 }
1264
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 if (likely(bio->bi_size)) {
James Bottomley73c77e22010-01-25 11:42:24 -06001266 if (xfs_buf_is_vmapped(bp)) {
1267 flush_kernel_vmap_range(bp->b_addr,
1268 xfs_buf_vmap_len(bp));
1269 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 submit_bio(rw, bio);
1271 if (size)
1272 goto next_chunk;
1273 } else {
Dave Chinner37eb17e2012-11-12 22:09:46 +11001274 /*
1275 * This is guaranteed not to be the last io reference count
1276 * because the caller (xfs_buf_iorequest) holds a count itself.
1277 */
1278 atomic_dec(&bp->b_io_remaining);
Nathan Scottce8e9222006-01-11 15:39:08 +11001279 xfs_buf_ioerror(bp, EIO);
Dave Chinnerec53d1d2010-07-20 17:52:59 +10001280 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 }
Dave Chinner3e85c862012-06-22 18:50:09 +10001282
1283}
1284
1285STATIC void
1286_xfs_buf_ioapply(
1287 struct xfs_buf *bp)
1288{
1289 struct blk_plug plug;
1290 int rw;
1291 int offset;
1292 int size;
1293 int i;
1294
Dave Chinnerc163f9a2013-03-12 23:30:34 +11001295 /*
1296 * Make sure we capture only current IO errors rather than stale errors
1297 * left over from previous use of the buffer (e.g. failed readahead).
1298 */
1299 bp->b_error = 0;
1300
Dave Chinner3e85c862012-06-22 18:50:09 +10001301 if (bp->b_flags & XBF_WRITE) {
1302 if (bp->b_flags & XBF_SYNCIO)
1303 rw = WRITE_SYNC;
1304 else
1305 rw = WRITE;
1306 if (bp->b_flags & XBF_FUA)
1307 rw |= REQ_FUA;
1308 if (bp->b_flags & XBF_FLUSH)
1309 rw |= REQ_FLUSH;
Dave Chinner1813dd62012-11-14 17:54:40 +11001310
1311 /*
1312 * Run the write verifier callback function if it exists. If
1313 * this function fails it will mark the buffer with an error and
1314 * the IO should not be dispatched.
1315 */
1316 if (bp->b_ops) {
1317 bp->b_ops->verify_write(bp);
1318 if (bp->b_error) {
1319 xfs_force_shutdown(bp->b_target->bt_mount,
1320 SHUTDOWN_CORRUPT_INCORE);
1321 return;
1322 }
1323 }
Dave Chinner3e85c862012-06-22 18:50:09 +10001324 } else if (bp->b_flags & XBF_READ_AHEAD) {
1325 rw = READA;
1326 } else {
1327 rw = READ;
1328 }
1329
1330 /* we only use the buffer cache for meta-data */
1331 rw |= REQ_META;
1332
1333 /*
1334 * Walk all the vectors issuing IO on them. Set up the initial offset
1335 * into the buffer and the desired IO size before we start -
1336 * _xfs_buf_ioapply_vec() will modify them appropriately for each
1337 * subsequent call.
1338 */
1339 offset = bp->b_offset;
1340 size = BBTOB(bp->b_io_length);
1341 blk_start_plug(&plug);
1342 for (i = 0; i < bp->b_map_count; i++) {
1343 xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
1344 if (bp->b_error)
1345 break;
1346 if (size <= 0)
1347 break; /* all done */
1348 }
1349 blk_finish_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350}
1351
Dave Chinner0e95f192012-04-23 15:58:46 +10001352void
Nathan Scottce8e9222006-01-11 15:39:08 +11001353xfs_buf_iorequest(
1354 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355{
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001356 trace_xfs_buf_iorequest(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001358 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359
Christoph Hellwig375ec692011-08-23 08:28:03 +00001360 if (bp->b_flags & XBF_WRITE)
Nathan Scottce8e9222006-01-11 15:39:08 +11001361 xfs_buf_wait_unpin(bp);
Nathan Scottce8e9222006-01-11 15:39:08 +11001362 xfs_buf_hold(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363
1364 /* Set the count to 1 initially, this will stop an I/O
1365 * completion callout which happens before we have started
Nathan Scottce8e9222006-01-11 15:39:08 +11001366 * all the I/O from calling xfs_buf_ioend too early.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 */
Nathan Scottce8e9222006-01-11 15:39:08 +11001368 atomic_set(&bp->b_io_remaining, 1);
1369 _xfs_buf_ioapply(bp);
Christoph Hellwig08023d62012-07-02 06:00:04 -04001370 _xfs_buf_ioend(bp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371
Nathan Scottce8e9222006-01-11 15:39:08 +11001372 xfs_buf_rele(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373}
1374
1375/*
Dave Chinner0e95f192012-04-23 15:58:46 +10001376 * Waits for I/O to complete on the buffer supplied. It returns immediately if
1377 * no I/O is pending or there is already a pending error on the buffer. It
1378 * returns the I/O error code, if any, or 0 if there was no error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 */
1380int
Nathan Scottce8e9222006-01-11 15:39:08 +11001381xfs_buf_iowait(
1382 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383{
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001384 trace_xfs_buf_iowait(bp, _RET_IP_);
1385
Dave Chinner0e95f192012-04-23 15:58:46 +10001386 if (!bp->b_error)
1387 wait_for_completion(&bp->b_iowait);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001388
1389 trace_xfs_buf_iowait_done(bp, _RET_IP_);
Nathan Scottce8e9222006-01-11 15:39:08 +11001390 return bp->b_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391}
1392
Nathan Scottce8e9222006-01-11 15:39:08 +11001393xfs_caddr_t
1394xfs_buf_offset(
1395 xfs_buf_t *bp,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 size_t offset)
1397{
1398 struct page *page;
1399
Dave Chinner611c9942012-04-23 15:59:07 +10001400 if (bp->b_addr)
Chandra Seetharaman62926042011-07-22 23:40:15 +00001401 return bp->b_addr + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402
Nathan Scottce8e9222006-01-11 15:39:08 +11001403 offset += bp->b_offset;
Dave Chinner0e6e8472011-03-26 09:16:45 +11001404 page = bp->b_pages[offset >> PAGE_SHIFT];
1405 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406}
1407
1408/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 * Move data into or out of a buffer.
1410 */
1411void
Nathan Scottce8e9222006-01-11 15:39:08 +11001412xfs_buf_iomove(
1413 xfs_buf_t *bp, /* buffer to process */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 size_t boff, /* starting buffer offset */
1415 size_t bsize, /* length to copy */
Dave Chinnerb9c48642010-01-20 10:47:39 +11001416 void *data, /* data address */
Nathan Scottce8e9222006-01-11 15:39:08 +11001417 xfs_buf_rw_t mode) /* read/write/zero flag */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418{
Dave Chinner795cac72012-04-23 15:58:53 +10001419 size_t bend;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420
1421 bend = boff + bsize;
1422 while (boff < bend) {
Dave Chinner795cac72012-04-23 15:58:53 +10001423 struct page *page;
1424 int page_index, page_offset, csize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425
Dave Chinner795cac72012-04-23 15:58:53 +10001426 page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1427 page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1428 page = bp->b_pages[page_index];
1429 csize = min_t(size_t, PAGE_SIZE - page_offset,
1430 BBTOB(bp->b_io_length) - boff);
1431
1432 ASSERT((csize + page_offset) <= PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433
1434 switch (mode) {
Nathan Scottce8e9222006-01-11 15:39:08 +11001435 case XBRW_ZERO:
Dave Chinner795cac72012-04-23 15:58:53 +10001436 memset(page_address(page) + page_offset, 0, csize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 break;
Nathan Scottce8e9222006-01-11 15:39:08 +11001438 case XBRW_READ:
Dave Chinner795cac72012-04-23 15:58:53 +10001439 memcpy(data, page_address(page) + page_offset, csize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 break;
Nathan Scottce8e9222006-01-11 15:39:08 +11001441 case XBRW_WRITE:
Dave Chinner795cac72012-04-23 15:58:53 +10001442 memcpy(page_address(page) + page_offset, data, csize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 }
1444
1445 boff += csize;
1446 data += csize;
1447 }
1448}
1449
1450/*
Nathan Scottce8e9222006-01-11 15:39:08 +11001451 * Handling of buffer targets (buftargs).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 */
1453
1454/*
Dave Chinner430cbeb2010-12-02 16:30:55 +11001455 * Wait for any bufs with callbacks that have been submitted but have not yet
1456 * returned. These buffers will have an elevated hold count, so wait on those
1457 * while freeing all the buffers only held by the LRU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 */
Dave Chinnere80dfa12013-08-28 10:18:05 +10001459static enum lru_status
1460xfs_buftarg_wait_rele(
1461 struct list_head *item,
1462 spinlock_t *lru_lock,
1463 void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464
Dave Chinnere80dfa12013-08-28 10:18:05 +10001465{
1466 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
Dave Chinnera4082352013-08-28 10:18:06 +10001467 struct list_head *dispose = arg;
Dave Chinnere80dfa12013-08-28 10:18:05 +10001468
1469 if (atomic_read(&bp->b_hold) > 1) {
Dave Chinnera4082352013-08-28 10:18:06 +10001470 /* need to wait, so skip it this pass */
Dave Chinnere80dfa12013-08-28 10:18:05 +10001471 trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
Dave Chinnera4082352013-08-28 10:18:06 +10001472 return LRU_SKIP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 }
Dave Chinnera4082352013-08-28 10:18:06 +10001474 if (!spin_trylock(&bp->b_lock))
1475 return LRU_SKIP;
Dave Chinnere80dfa12013-08-28 10:18:05 +10001476
Dave Chinnera4082352013-08-28 10:18:06 +10001477 /*
1478 * clear the LRU reference count so the buffer doesn't get
1479 * ignored in xfs_buf_rele().
1480 */
1481 atomic_set(&bp->b_lru_ref, 0);
1482 bp->b_state |= XFS_BSTATE_DISPOSE;
1483 list_move(item, dispose);
1484 spin_unlock(&bp->b_lock);
1485 return LRU_REMOVED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486}
1487
Dave Chinnere80dfa12013-08-28 10:18:05 +10001488void
1489xfs_wait_buftarg(
1490 struct xfs_buftarg *btp)
1491{
Dave Chinnera4082352013-08-28 10:18:06 +10001492 LIST_HEAD(dispose);
1493 int loop = 0;
1494
1495 /* loop until there is nothing left on the lru list. */
1496 while (list_lru_count(&btp->bt_lru)) {
Dave Chinnere80dfa12013-08-28 10:18:05 +10001497 list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
Dave Chinnera4082352013-08-28 10:18:06 +10001498 &dispose, LONG_MAX);
1499
1500 while (!list_empty(&dispose)) {
1501 struct xfs_buf *bp;
1502 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1503 list_del_init(&bp->b_lru);
1504 xfs_buf_rele(bp);
1505 }
1506 if (loop++ != 0)
1507 delay(100);
1508 }
Dave Chinnere80dfa12013-08-28 10:18:05 +10001509}
1510
1511static enum lru_status
1512xfs_buftarg_isolate(
1513 struct list_head *item,
1514 spinlock_t *lru_lock,
1515 void *arg)
1516{
1517 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1518 struct list_head *dispose = arg;
1519
1520 /*
Dave Chinnera4082352013-08-28 10:18:06 +10001521 * we are inverting the lru lock/bp->b_lock here, so use a trylock.
1522 * If we fail to get the lock, just skip it.
1523 */
1524 if (!spin_trylock(&bp->b_lock))
1525 return LRU_SKIP;
1526 /*
Dave Chinnere80dfa12013-08-28 10:18:05 +10001527 * Decrement the b_lru_ref count unless the value is already
1528 * zero. If the value is already zero, we need to reclaim the
1529 * buffer, otherwise it gets another trip through the LRU.
1530 */
Dave Chinnera4082352013-08-28 10:18:06 +10001531 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1532 spin_unlock(&bp->b_lock);
Dave Chinnere80dfa12013-08-28 10:18:05 +10001533 return LRU_ROTATE;
Dave Chinnera4082352013-08-28 10:18:06 +10001534 }
Dave Chinnere80dfa12013-08-28 10:18:05 +10001535
Dave Chinnera4082352013-08-28 10:18:06 +10001536 bp->b_state |= XFS_BSTATE_DISPOSE;
Dave Chinnere80dfa12013-08-28 10:18:05 +10001537 list_move(item, dispose);
Dave Chinnera4082352013-08-28 10:18:06 +10001538 spin_unlock(&bp->b_lock);
Dave Chinnere80dfa12013-08-28 10:18:05 +10001539 return LRU_REMOVED;
1540}
1541
Andrew Mortonaddbda42013-08-28 10:18:06 +10001542static unsigned long
Dave Chinnere80dfa12013-08-28 10:18:05 +10001543xfs_buftarg_shrink_scan(
Dave Chinnerff57ab22010-11-30 17:27:57 +11001544 struct shrinker *shrink,
Ying Han1495f232011-05-24 17:12:27 -07001545 struct shrink_control *sc)
David Chinnera6867a62006-01-11 15:37:58 +11001546{
Dave Chinnerff57ab22010-11-30 17:27:57 +11001547 struct xfs_buftarg *btp = container_of(shrink,
1548 struct xfs_buftarg, bt_shrinker);
Dave Chinner430cbeb2010-12-02 16:30:55 +11001549 LIST_HEAD(dispose);
Andrew Mortonaddbda42013-08-28 10:18:06 +10001550 unsigned long freed;
Dave Chinnere80dfa12013-08-28 10:18:05 +10001551 unsigned long nr_to_scan = sc->nr_to_scan;
Dave Chinner430cbeb2010-12-02 16:30:55 +11001552
Dave Chinnere80dfa12013-08-28 10:18:05 +10001553 freed = list_lru_walk_node(&btp->bt_lru, sc->nid, xfs_buftarg_isolate,
1554 &dispose, &nr_to_scan);
Dave Chinner430cbeb2010-12-02 16:30:55 +11001555
1556 while (!list_empty(&dispose)) {
Dave Chinnere80dfa12013-08-28 10:18:05 +10001557 struct xfs_buf *bp;
Dave Chinner430cbeb2010-12-02 16:30:55 +11001558 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1559 list_del_init(&bp->b_lru);
1560 xfs_buf_rele(bp);
1561 }
1562
Dave Chinnere80dfa12013-08-28 10:18:05 +10001563 return freed;
1564}
1565
Andrew Mortonaddbda42013-08-28 10:18:06 +10001566static unsigned long
Dave Chinnere80dfa12013-08-28 10:18:05 +10001567xfs_buftarg_shrink_count(
1568 struct shrinker *shrink,
1569 struct shrink_control *sc)
1570{
1571 struct xfs_buftarg *btp = container_of(shrink,
1572 struct xfs_buftarg, bt_shrinker);
1573 return list_lru_count_node(&btp->bt_lru, sc->nid);
David Chinnera6867a62006-01-11 15:37:58 +11001574}
1575
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576void
1577xfs_free_buftarg(
Christoph Hellwigb7963132009-03-03 14:48:37 -05001578 struct xfs_mount *mp,
1579 struct xfs_buftarg *btp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580{
Dave Chinnerff57ab22010-11-30 17:27:57 +11001581 unregister_shrinker(&btp->bt_shrinker);
Glauber Costaf5e1dd32013-08-28 10:18:18 +10001582 list_lru_destroy(&btp->bt_lru);
Dave Chinnerff57ab22010-11-30 17:27:57 +11001583
Christoph Hellwigb7963132009-03-03 14:48:37 -05001584 if (mp->m_flags & XFS_MOUNT_BARRIER)
1585 xfs_blkdev_issue_flush(btp);
David Chinnera6867a62006-01-11 15:37:58 +11001586
Denys Vlasenkof0e2d932008-05-19 16:31:57 +10001587 kmem_free(btp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588}
1589
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590STATIC int
1591xfs_setsize_buftarg_flags(
1592 xfs_buftarg_t *btp,
1593 unsigned int blocksize,
1594 unsigned int sectorsize,
1595 int verbose)
1596{
Nathan Scottce8e9222006-01-11 15:39:08 +11001597 btp->bt_bsize = blocksize;
1598 btp->bt_sshift = ffs(sectorsize) - 1;
1599 btp->bt_smask = sectorsize - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600
Nathan Scottce8e9222006-01-11 15:39:08 +11001601 if (set_blocksize(btp->bt_bdev, sectorsize)) {
Christoph Hellwig02b102d2011-10-10 16:52:51 +00001602 char name[BDEVNAME_SIZE];
1603
1604 bdevname(btp->bt_bdev, name);
1605
Dave Chinner4f107002011-03-07 10:00:35 +11001606 xfs_warn(btp->bt_mount,
Eric Sandeen08e96e12013-10-11 20:59:05 -05001607 "Cannot set_blocksize to %u on device %s",
Christoph Hellwig02b102d2011-10-10 16:52:51 +00001608 sectorsize, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 return EINVAL;
1610 }
1611
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612 return 0;
1613}
1614
1615/*
Nathan Scottce8e9222006-01-11 15:39:08 +11001616 * When allocating the initial buffer target we have not yet
1617 * read in the superblock, so don't know what sized sectors
Zhi Yong Wu8b4ad792013-08-12 03:14:56 +00001618 * are being used at this early stage. Play safe.
Nathan Scottce8e9222006-01-11 15:39:08 +11001619 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620STATIC int
1621xfs_setsize_buftarg_early(
1622 xfs_buftarg_t *btp,
1623 struct block_device *bdev)
1624{
1625 return xfs_setsize_buftarg_flags(btp,
Dave Chinner0e6e8472011-03-26 09:16:45 +11001626 PAGE_SIZE, bdev_logical_block_size(bdev), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627}
1628
1629int
1630xfs_setsize_buftarg(
1631 xfs_buftarg_t *btp,
1632 unsigned int blocksize,
1633 unsigned int sectorsize)
1634{
1635 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1636}
1637
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638xfs_buftarg_t *
1639xfs_alloc_buftarg(
Dave Chinnerebad8612010-09-22 10:47:20 +10001640 struct xfs_mount *mp,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 struct block_device *bdev,
Jan Engelhardte2a07812010-03-23 09:52:55 +11001642 int external,
1643 const char *fsname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644{
1645 xfs_buftarg_t *btp;
1646
Dave Chinnerb17cb362013-05-20 09:51:12 +10001647 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648
Dave Chinnerebad8612010-09-22 10:47:20 +10001649 btp->bt_mount = mp;
Nathan Scottce8e9222006-01-11 15:39:08 +11001650 btp->bt_dev = bdev->bd_dev;
1651 btp->bt_bdev = bdev;
Dave Chinner0e6e8472011-03-26 09:16:45 +11001652 btp->bt_bdi = blk_get_backing_dev_info(bdev);
1653 if (!btp->bt_bdi)
1654 goto error;
1655
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 if (xfs_setsize_buftarg_early(btp, bdev))
1657 goto error;
Glauber Costa5ca302c2013-08-28 10:18:18 +10001658
1659 if (list_lru_init(&btp->bt_lru))
1660 goto error;
1661
Dave Chinnere80dfa12013-08-28 10:18:05 +10001662 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
1663 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
Dave Chinnerff57ab22010-11-30 17:27:57 +11001664 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
Dave Chinnere80dfa12013-08-28 10:18:05 +10001665 btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
Dave Chinnerff57ab22010-11-30 17:27:57 +11001666 register_shrinker(&btp->bt_shrinker);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 return btp;
1668
1669error:
Denys Vlasenkof0e2d932008-05-19 16:31:57 +10001670 kmem_free(btp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 return NULL;
1672}
1673
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674/*
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001675 * Add a buffer to the delayed write list.
1676 *
1677 * This queues a buffer for writeout if it hasn't already been. Note that
1678 * neither this routine nor the buffer list submission functions perform
1679 * any internal synchronization. It is expected that the lists are thread-local
1680 * to the callers.
1681 *
1682 * Returns true if we queued up the buffer, or false if it already had
1683 * been on the buffer list.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 */
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001685bool
Nathan Scottce8e9222006-01-11 15:39:08 +11001686xfs_buf_delwri_queue(
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001687 struct xfs_buf *bp,
1688 struct list_head *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689{
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001690 ASSERT(xfs_buf_islocked(bp));
1691 ASSERT(!(bp->b_flags & XBF_READ));
1692
1693 /*
1694 * If the buffer is already marked delwri it already is queued up
1695 * by someone else for imediate writeout. Just ignore it in that
1696 * case.
1697 */
1698 if (bp->b_flags & _XBF_DELWRI_Q) {
1699 trace_xfs_buf_delwri_queued(bp, _RET_IP_);
1700 return false;
1701 }
David Chinnera6867a62006-01-11 15:37:58 +11001702
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001703 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1704
Dave Chinnerd808f612010-02-02 10:13:42 +11001705 /*
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001706 * If a buffer gets written out synchronously or marked stale while it
1707 * is on a delwri list we lazily remove it. To do this, the other party
1708 * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
1709 * It remains referenced and on the list. In a rare corner case it
1710 * might get readded to a delwri list after the synchronous writeout, in
1711 * which case we need just need to re-add the flag here.
Dave Chinnerd808f612010-02-02 10:13:42 +11001712 */
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001713 bp->b_flags |= _XBF_DELWRI_Q;
1714 if (list_empty(&bp->b_list)) {
1715 atomic_inc(&bp->b_hold);
1716 list_add_tail(&bp->b_list, list);
David Chinner585e6d82007-02-10 18:32:29 +11001717 }
David Chinner585e6d82007-02-10 18:32:29 +11001718
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001719 return true;
David Chinner585e6d82007-02-10 18:32:29 +11001720}
1721
Dave Chinner089716a2010-01-26 15:13:25 +11001722/*
1723 * Compare function is more complex than it needs to be because
1724 * the return value is only 32 bits and we are doing comparisons
1725 * on 64 bit values
1726 */
1727static int
1728xfs_buf_cmp(
1729 void *priv,
1730 struct list_head *a,
1731 struct list_head *b)
1732{
1733 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
1734 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
1735 xfs_daddr_t diff;
1736
Mark Tinguelyf4b42422012-12-04 17:18:02 -06001737 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
Dave Chinner089716a2010-01-26 15:13:25 +11001738 if (diff < 0)
1739 return -1;
1740 if (diff > 0)
1741 return 1;
1742 return 0;
1743}
1744
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001745static int
1746__xfs_buf_delwri_submit(
1747 struct list_head *buffer_list,
1748 struct list_head *io_list,
1749 bool wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750{
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001751 struct blk_plug plug;
1752 struct xfs_buf *bp, *n;
1753 int pinned = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001755 list_for_each_entry_safe(bp, n, buffer_list, b_list) {
1756 if (!wait) {
1757 if (xfs_buf_ispinned(bp)) {
1758 pinned++;
1759 continue;
1760 }
1761 if (!xfs_buf_trylock(bp))
1762 continue;
1763 } else {
1764 xfs_buf_lock(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001767 /*
1768 * Someone else might have written the buffer synchronously or
1769 * marked it stale in the meantime. In that case only the
1770 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
1771 * reference and remove it from the list here.
1772 */
1773 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1774 list_del_init(&bp->b_list);
1775 xfs_buf_relse(bp);
1776 continue;
1777 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001779 list_move_tail(&bp->b_list, io_list);
1780 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1781 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001783 list_sort(NULL, io_list, xfs_buf_cmp);
Christoph Hellwiga1b7ea52011-03-30 11:05:09 +00001784
1785 blk_start_plug(&plug);
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001786 list_for_each_entry_safe(bp, n, io_list, b_list) {
1787 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC);
1788 bp->b_flags |= XBF_WRITE;
1789
1790 if (!wait) {
1791 bp->b_flags |= XBF_ASYNC;
1792 list_del_init(&bp->b_list);
Dave Chinner089716a2010-01-26 15:13:25 +11001793 }
Christoph Hellwig939d7232010-07-20 17:51:16 +10001794 xfs_bdstrat_cb(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 }
Christoph Hellwiga1b7ea52011-03-30 11:05:09 +00001796 blk_finish_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001798 return pinned;
1799}
Nathan Scottf07c2252006-09-28 10:52:15 +10001800
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001801/*
1802 * Write out a buffer list asynchronously.
1803 *
1804 * This will take the @buffer_list, write all non-locked and non-pinned buffers
1805 * out and not wait for I/O completion on any of the buffers. This interface
1806 * is only safely useable for callers that can track I/O completion by higher
1807 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
1808 * function.
1809 */
1810int
1811xfs_buf_delwri_submit_nowait(
1812 struct list_head *buffer_list)
1813{
1814 LIST_HEAD (io_list);
1815 return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
1816}
1817
1818/*
1819 * Write out a buffer list synchronously.
1820 *
1821 * This will take the @buffer_list, write all buffers out and wait for I/O
1822 * completion on all of the buffers. @buffer_list is consumed by the function,
1823 * so callers must have some other way of tracking buffers if they require such
1824 * functionality.
1825 */
1826int
1827xfs_buf_delwri_submit(
1828 struct list_head *buffer_list)
1829{
1830 LIST_HEAD (io_list);
1831 int error = 0, error2;
1832 struct xfs_buf *bp;
1833
1834 __xfs_buf_delwri_submit(buffer_list, &io_list, true);
1835
1836 /* Wait for IO to complete. */
1837 while (!list_empty(&io_list)) {
1838 bp = list_first_entry(&io_list, struct xfs_buf, b_list);
1839
1840 list_del_init(&bp->b_list);
1841 error2 = xfs_buf_iowait(bp);
1842 xfs_buf_relse(bp);
1843 if (!error)
1844 error = error2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 }
1846
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001847 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848}
1849
Christoph Hellwig04d8b282005-11-02 10:15:05 +11001850int __init
Nathan Scottce8e9222006-01-11 15:39:08 +11001851xfs_buf_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852{
Nathan Scott87582802006-03-14 13:18:19 +11001853 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1854 KM_ZONE_HWALIGN, NULL);
Nathan Scottce8e9222006-01-11 15:39:08 +11001855 if (!xfs_buf_zone)
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001856 goto out;
Christoph Hellwig04d8b282005-11-02 10:15:05 +11001857
Dave Chinner51749e42010-09-08 09:00:22 +00001858 xfslogd_workqueue = alloc_workqueue("xfslogd",
Tejun Heo6370a6a2010-10-11 15:12:27 +02001859 WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
Christoph Hellwig23ea4032005-06-21 15:14:01 +10001860 if (!xfslogd_workqueue)
Christoph Hellwig04d8b282005-11-02 10:15:05 +11001861 goto out_free_buf_zone;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862
Christoph Hellwig23ea4032005-06-21 15:14:01 +10001863 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864
Christoph Hellwig23ea4032005-06-21 15:14:01 +10001865 out_free_buf_zone:
Nathan Scottce8e9222006-01-11 15:39:08 +11001866 kmem_zone_destroy(xfs_buf_zone);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001867 out:
Nathan Scott87582802006-03-14 13:18:19 +11001868 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869}
1870
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871void
Nathan Scottce8e9222006-01-11 15:39:08 +11001872xfs_buf_terminate(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873{
Christoph Hellwig04d8b282005-11-02 10:15:05 +11001874 destroy_workqueue(xfslogd_workqueue);
Nathan Scottce8e9222006-01-11 15:39:08 +11001875 kmem_zone_destroy(xfs_buf_zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876}