blob: 72f038492ba8cdb2bd609fd19949d96b95c8ede0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scottf07c2252006-09-28 10:52:15 +10002 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Nathan Scott7b718762005-11-02 14:58:39 +11003 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Vlad Apostolov93c189c2006-11-11 18:03:49 +110018#include "xfs.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/stddef.h>
20#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/pagemap.h>
23#include <linux/init.h>
24#include <linux/vmalloc.h>
25#include <linux/bio.h>
26#include <linux/sysctl.h>
27#include <linux/proc_fs.h>
28#include <linux/workqueue.h>
29#include <linux/percpu.h>
30#include <linux/blkdev.h>
31#include <linux/hash.h>
Christoph Hellwig4df08c52005-09-05 08:34:18 +100032#include <linux/kthread.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080033#include <linux/migrate.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070034#include <linux/backing-dev.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080035#include <linux/freezer.h>
Ingo Molnar5b3cc152017-02-02 20:43:54 +010036#include <linux/sched/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Christoph Hellwig4fb6e8a2014-11-28 14:25:04 +110038#include "xfs_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +110039#include "xfs_log_format.h"
Dave Chinner7fd36c42013-08-12 20:49:32 +100040#include "xfs_trans_resv.h"
Dave Chinner239880e2013-10-23 10:50:10 +110041#include "xfs_sb.h"
Christoph Hellwigb7963132009-03-03 14:48:37 -050042#include "xfs_mount.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000043#include "xfs_trace.h"
Dave Chinner239880e2013-10-23 10:50:10 +110044#include "xfs_log.h"
Christoph Hellwigb7963132009-03-03 14:48:37 -050045
David Chinner7989cb82007-02-10 18:34:56 +110046static kmem_zone_t *xfs_buf_zone;
Christoph Hellwig23ea4032005-06-21 15:14:01 +100047
Nathan Scottce8e9222006-01-11 15:39:08 +110048#ifdef XFS_BUF_LOCK_TRACKING
49# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
50# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
51# define XB_GET_OWNER(bp) ((bp)->b_last_holder)
Linus Torvalds1da177e2005-04-16 15:20:36 -070052#else
Nathan Scottce8e9222006-01-11 15:39:08 +110053# define XB_SET_OWNER(bp) do { } while (0)
54# define XB_CLEAR_OWNER(bp) do { } while (0)
55# define XB_GET_OWNER(bp) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#endif
57
Nathan Scottce8e9222006-01-11 15:39:08 +110058#define xb_to_gfp(flags) \
Dave Chinneraa5c1582012-04-23 15:58:56 +100059 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
James Bottomley73c77e22010-01-25 11:42:24 -060062static inline int
63xfs_buf_is_vmapped(
64 struct xfs_buf *bp)
65{
66 /*
67 * Return true if the buffer is vmapped.
68 *
Dave Chinner611c9942012-04-23 15:59:07 +100069 * b_addr is null if the buffer is not mapped, but the code is clever
70 * enough to know it doesn't have to map a single page, so the check has
71 * to be both for b_addr and bp->b_page_count > 1.
James Bottomley73c77e22010-01-25 11:42:24 -060072 */
Dave Chinner611c9942012-04-23 15:59:07 +100073 return bp->b_addr && bp->b_page_count > 1;
James Bottomley73c77e22010-01-25 11:42:24 -060074}
75
76static inline int
77xfs_buf_vmap_len(
78 struct xfs_buf *bp)
79{
80 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
81}
82
Linus Torvalds1da177e2005-04-16 15:20:36 -070083/*
Brian Foster9c7504a2016-07-20 11:15:28 +100084 * Bump the I/O in flight count on the buftarg if we haven't yet done so for
85 * this buffer. The count is incremented once per buffer (per hold cycle)
86 * because the corresponding decrement is deferred to buffer release. Buffers
87 * can undergo I/O multiple times in a hold-release cycle and per buffer I/O
88 * tracking adds unnecessary overhead. This is used for sychronization purposes
89 * with unmount (see xfs_wait_buftarg()), so all we really need is a count of
90 * in-flight buffers.
91 *
92 * Buffers that are never released (e.g., superblock, iclog buffers) must set
93 * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count
94 * never reaches zero and unmount hangs indefinitely.
95 */
96static inline void
97xfs_buf_ioacct_inc(
98 struct xfs_buf *bp)
99{
Brian Foster63db7c82017-05-31 08:22:52 -0700100 if (bp->b_flags & XBF_NO_IOACCT)
Brian Foster9c7504a2016-07-20 11:15:28 +1000101 return;
102
103 ASSERT(bp->b_flags & XBF_ASYNC);
Brian Foster63db7c82017-05-31 08:22:52 -0700104 spin_lock(&bp->b_lock);
105 if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
106 bp->b_state |= XFS_BSTATE_IN_FLIGHT;
107 percpu_counter_inc(&bp->b_target->bt_io_count);
108 }
109 spin_unlock(&bp->b_lock);
Brian Foster9c7504a2016-07-20 11:15:28 +1000110}
111
112/*
113 * Clear the in-flight state on a buffer about to be released to the LRU or
114 * freed and unaccount from the buftarg.
115 */
116static inline void
Brian Foster63db7c82017-05-31 08:22:52 -0700117__xfs_buf_ioacct_dec(
118 struct xfs_buf *bp)
119{
Brian Foster95989c42017-06-08 08:23:07 -0700120 lockdep_assert_held(&bp->b_lock);
Brian Foster63db7c82017-05-31 08:22:52 -0700121
122 if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
123 bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
124 percpu_counter_dec(&bp->b_target->bt_io_count);
125 }
126}
127
128static inline void
Brian Foster9c7504a2016-07-20 11:15:28 +1000129xfs_buf_ioacct_dec(
130 struct xfs_buf *bp)
131{
Brian Foster63db7c82017-05-31 08:22:52 -0700132 spin_lock(&bp->b_lock);
133 __xfs_buf_ioacct_dec(bp);
134 spin_unlock(&bp->b_lock);
Brian Foster9c7504a2016-07-20 11:15:28 +1000135}
136
137/*
Dave Chinner430cbeb2010-12-02 16:30:55 +1100138 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
139 * b_lru_ref count so that the buffer is freed immediately when the buffer
140 * reference count falls to zero. If the buffer is already on the LRU, we need
141 * to remove the reference that LRU holds on the buffer.
142 *
143 * This prevents build-up of stale buffers on the LRU.
144 */
145void
146xfs_buf_stale(
147 struct xfs_buf *bp)
148{
Christoph Hellwig43ff2122012-04-23 15:58:39 +1000149 ASSERT(xfs_buf_islocked(bp));
150
Dave Chinner430cbeb2010-12-02 16:30:55 +1100151 bp->b_flags |= XBF_STALE;
Christoph Hellwig43ff2122012-04-23 15:58:39 +1000152
153 /*
154 * Clear the delwri status so that a delwri queue walker will not
155 * flush this buffer to disk now that it is stale. The delwri queue has
156 * a reference to the buffer, so this is safe to do.
157 */
158 bp->b_flags &= ~_XBF_DELWRI_Q;
159
Brian Foster9c7504a2016-07-20 11:15:28 +1000160 /*
161 * Once the buffer is marked stale and unlocked, a subsequent lookup
162 * could reset b_flags. There is no guarantee that the buffer is
163 * unaccounted (released to LRU) before that occurs. Drop in-flight
164 * status now to preserve accounting consistency.
165 */
Dave Chinnera4082352013-08-28 10:18:06 +1000166 spin_lock(&bp->b_lock);
Brian Foster63db7c82017-05-31 08:22:52 -0700167 __xfs_buf_ioacct_dec(bp);
168
Dave Chinnera4082352013-08-28 10:18:06 +1000169 atomic_set(&bp->b_lru_ref, 0);
170 if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
Dave Chinnere80dfa12013-08-28 10:18:05 +1000171 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
172 atomic_dec(&bp->b_hold);
Dave Chinner430cbeb2010-12-02 16:30:55 +1100173
Dave Chinner430cbeb2010-12-02 16:30:55 +1100174 ASSERT(atomic_read(&bp->b_hold) >= 1);
Dave Chinnera4082352013-08-28 10:18:06 +1000175 spin_unlock(&bp->b_lock);
Dave Chinner430cbeb2010-12-02 16:30:55 +1100176}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177
Dave Chinner3e85c862012-06-22 18:50:09 +1000178static int
179xfs_buf_get_maps(
180 struct xfs_buf *bp,
181 int map_count)
182{
183 ASSERT(bp->b_maps == NULL);
184 bp->b_map_count = map_count;
185
186 if (map_count == 1) {
Mark Tinguelyf4b42422012-12-04 17:18:02 -0600187 bp->b_maps = &bp->__b_map;
Dave Chinner3e85c862012-06-22 18:50:09 +1000188 return 0;
189 }
190
191 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
192 KM_NOFS);
193 if (!bp->b_maps)
Dave Chinner24513372014-06-25 14:58:08 +1000194 return -ENOMEM;
Dave Chinner3e85c862012-06-22 18:50:09 +1000195 return 0;
196}
197
198/*
199 * Frees b_pages if it was allocated.
200 */
201static void
202xfs_buf_free_maps(
203 struct xfs_buf *bp)
204{
Mark Tinguelyf4b42422012-12-04 17:18:02 -0600205 if (bp->b_maps != &bp->__b_map) {
Dave Chinner3e85c862012-06-22 18:50:09 +1000206 kmem_free(bp->b_maps);
207 bp->b_maps = NULL;
208 }
209}
210
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000211struct xfs_buf *
Dave Chinner3e85c862012-06-22 18:50:09 +1000212_xfs_buf_alloc(
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000213 struct xfs_buftarg *target,
Dave Chinner3e85c862012-06-22 18:50:09 +1000214 struct xfs_buf_map *map,
215 int nmaps,
Nathan Scottce8e9222006-01-11 15:39:08 +1100216 xfs_buf_flags_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217{
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000218 struct xfs_buf *bp;
Dave Chinner3e85c862012-06-22 18:50:09 +1000219 int error;
220 int i;
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000221
Dave Chinneraa5c1582012-04-23 15:58:56 +1000222 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000223 if (unlikely(!bp))
224 return NULL;
225
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 /*
Dave Chinner12bcb3f2012-04-23 15:59:05 +1000227 * We don't want certain flags to appear in b_flags unless they are
228 * specifically set by later operations on the buffer.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 */
Dave Chinner611c9942012-04-23 15:59:07 +1000230 flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
Nathan Scottce8e9222006-01-11 15:39:08 +1100232 atomic_set(&bp->b_hold, 1);
Dave Chinner430cbeb2010-12-02 16:30:55 +1100233 atomic_set(&bp->b_lru_ref, 1);
David Chinnerb4dd3302008-08-13 16:36:11 +1000234 init_completion(&bp->b_iowait);
Dave Chinner430cbeb2010-12-02 16:30:55 +1100235 INIT_LIST_HEAD(&bp->b_lru);
Nathan Scottce8e9222006-01-11 15:39:08 +1100236 INIT_LIST_HEAD(&bp->b_list);
Thomas Gleixnera731cd12010-09-07 14:33:15 +0000237 sema_init(&bp->b_sema, 0); /* held, no waiters */
Dave Chinnera4082352013-08-28 10:18:06 +1000238 spin_lock_init(&bp->b_lock);
Nathan Scottce8e9222006-01-11 15:39:08 +1100239 XB_SET_OWNER(bp);
240 bp->b_target = target;
Dave Chinner3e85c862012-06-22 18:50:09 +1000241 bp->b_flags = flags;
Dave Chinnerde1cbee2012-04-23 15:58:50 +1000242
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 /*
Dave Chinneraa0e8832012-04-23 15:58:52 +1000244 * Set length and io_length to the same value initially.
245 * I/O routines should use io_length, which will be the same in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 * most cases but may be reset (e.g. XFS recovery).
247 */
Dave Chinner3e85c862012-06-22 18:50:09 +1000248 error = xfs_buf_get_maps(bp, nmaps);
249 if (error) {
250 kmem_zone_free(xfs_buf_zone, bp);
251 return NULL;
252 }
253
254 bp->b_bn = map[0].bm_bn;
255 bp->b_length = 0;
256 for (i = 0; i < nmaps; i++) {
257 bp->b_maps[i].bm_bn = map[i].bm_bn;
258 bp->b_maps[i].bm_len = map[i].bm_len;
259 bp->b_length += map[i].bm_len;
260 }
261 bp->b_io_length = bp->b_length;
262
Nathan Scottce8e9222006-01-11 15:39:08 +1100263 atomic_set(&bp->b_pin_count, 0);
264 init_waitqueue_head(&bp->b_waiters);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100266 XFS_STATS_INC(target->bt_mount, xb_create);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000267 trace_xfs_buf_init(bp, _RET_IP_);
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000268
269 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270}
271
272/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100273 * Allocate a page array capable of holding a specified number
274 * of pages, and point the page buf at it.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 */
276STATIC int
Nathan Scottce8e9222006-01-11 15:39:08 +1100277_xfs_buf_get_pages(
278 xfs_buf_t *bp,
Eric Sandeen87937bf2014-04-14 19:01:20 +1000279 int page_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280{
281 /* Make sure that we have a page list */
Nathan Scottce8e9222006-01-11 15:39:08 +1100282 if (bp->b_pages == NULL) {
Nathan Scottce8e9222006-01-11 15:39:08 +1100283 bp->b_page_count = page_count;
284 if (page_count <= XB_PAGES) {
285 bp->b_pages = bp->b_page_array;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 } else {
Nathan Scottce8e9222006-01-11 15:39:08 +1100287 bp->b_pages = kmem_alloc(sizeof(struct page *) *
Dave Chinneraa5c1582012-04-23 15:58:56 +1000288 page_count, KM_NOFS);
Nathan Scottce8e9222006-01-11 15:39:08 +1100289 if (bp->b_pages == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 return -ENOMEM;
291 }
Nathan Scottce8e9222006-01-11 15:39:08 +1100292 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 }
294 return 0;
295}
296
297/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100298 * Frees b_pages if it was allocated.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 */
300STATIC void
Nathan Scottce8e9222006-01-11 15:39:08 +1100301_xfs_buf_free_pages(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 xfs_buf_t *bp)
303{
Nathan Scottce8e9222006-01-11 15:39:08 +1100304 if (bp->b_pages != bp->b_page_array) {
Denys Vlasenkof0e2d932008-05-19 16:31:57 +1000305 kmem_free(bp->b_pages);
Dave Chinner3fc98b12009-12-14 23:11:57 +0000306 bp->b_pages = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 }
308}
309
310/*
311 * Releases the specified buffer.
312 *
313 * The modification state of any associated pages is left unchanged.
Zhi Yong Wub46fe822013-08-07 10:10:59 +0000314 * The buffer must not be on any hash - use xfs_buf_rele instead for
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 * hashed and refcounted buffers
316 */
317void
Nathan Scottce8e9222006-01-11 15:39:08 +1100318xfs_buf_free(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 xfs_buf_t *bp)
320{
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000321 trace_xfs_buf_free(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
Dave Chinner430cbeb2010-12-02 16:30:55 +1100323 ASSERT(list_empty(&bp->b_lru));
324
Dave Chinner0e6e8472011-03-26 09:16:45 +1100325 if (bp->b_flags & _XBF_PAGES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 uint i;
327
James Bottomley73c77e22010-01-25 11:42:24 -0600328 if (xfs_buf_is_vmapped(bp))
Alex Elder8a262e52010-03-16 18:55:56 +0000329 vm_unmap_ram(bp->b_addr - bp->b_offset,
330 bp->b_page_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
Nathan Scott948ecdb2006-09-28 11:03:13 +1000332 for (i = 0; i < bp->b_page_count; i++) {
333 struct page *page = bp->b_pages[i];
334
Dave Chinner0e6e8472011-03-26 09:16:45 +1100335 __free_page(page);
Nathan Scott948ecdb2006-09-28 11:03:13 +1000336 }
Dave Chinner0e6e8472011-03-26 09:16:45 +1100337 } else if (bp->b_flags & _XBF_KMEM)
338 kmem_free(bp->b_addr);
Dave Chinner3fc98b12009-12-14 23:11:57 +0000339 _xfs_buf_free_pages(bp);
Dave Chinner3e85c862012-06-22 18:50:09 +1000340 xfs_buf_free_maps(bp);
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000341 kmem_zone_free(xfs_buf_zone, bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342}
343
344/*
Dave Chinner0e6e8472011-03-26 09:16:45 +1100345 * Allocates all the pages for buffer in question and builds it's page list.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 */
347STATIC int
Dave Chinner0e6e8472011-03-26 09:16:45 +1100348xfs_buf_allocate_memory(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 xfs_buf_t *bp,
350 uint flags)
351{
Dave Chinneraa0e8832012-04-23 15:58:52 +1000352 size_t size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 size_t nbytes, offset;
Nathan Scottce8e9222006-01-11 15:39:08 +1100354 gfp_t gfp_mask = xb_to_gfp(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 unsigned short page_count, i;
Dave Chinner795cac72012-04-23 15:58:53 +1000356 xfs_off_t start, end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 int error;
358
Dave Chinner0e6e8472011-03-26 09:16:45 +1100359 /*
360 * for buffers that are contained within a single page, just allocate
361 * the memory from the heap - there's no need for the complexity of
362 * page arrays to keep allocation down to order 0.
363 */
Dave Chinner795cac72012-04-23 15:58:53 +1000364 size = BBTOB(bp->b_length);
365 if (size < PAGE_SIZE) {
Dave Chinneraa5c1582012-04-23 15:58:56 +1000366 bp->b_addr = kmem_alloc(size, KM_NOFS);
Dave Chinner0e6e8472011-03-26 09:16:45 +1100367 if (!bp->b_addr) {
368 /* low memory - use alloc_page loop instead */
369 goto use_alloc_page;
370 }
371
Dave Chinner795cac72012-04-23 15:58:53 +1000372 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
Dave Chinner0e6e8472011-03-26 09:16:45 +1100373 ((unsigned long)bp->b_addr & PAGE_MASK)) {
374 /* b_addr spans two pages - use alloc_page instead */
375 kmem_free(bp->b_addr);
376 bp->b_addr = NULL;
377 goto use_alloc_page;
378 }
379 bp->b_offset = offset_in_page(bp->b_addr);
380 bp->b_pages = bp->b_page_array;
381 bp->b_pages[0] = virt_to_page(bp->b_addr);
382 bp->b_page_count = 1;
Dave Chinner611c9942012-04-23 15:59:07 +1000383 bp->b_flags |= _XBF_KMEM;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100384 return 0;
385 }
386
387use_alloc_page:
Mark Tinguelyf4b42422012-12-04 17:18:02 -0600388 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
389 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
Dave Chinnercbb7baa2012-06-22 18:50:08 +1000390 >> PAGE_SHIFT;
Dave Chinner795cac72012-04-23 15:58:53 +1000391 page_count = end - start;
Eric Sandeen87937bf2014-04-14 19:01:20 +1000392 error = _xfs_buf_get_pages(bp, page_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 if (unlikely(error))
394 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
Nathan Scottce8e9222006-01-11 15:39:08 +1100396 offset = bp->b_offset;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100397 bp->b_flags |= _XBF_PAGES;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398
Nathan Scottce8e9222006-01-11 15:39:08 +1100399 for (i = 0; i < bp->b_page_count; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 struct page *page;
401 uint retries = 0;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100402retry:
403 page = alloc_page(gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 if (unlikely(page == NULL)) {
Nathan Scottce8e9222006-01-11 15:39:08 +1100405 if (flags & XBF_READ_AHEAD) {
406 bp->b_page_count = i;
Dave Chinner24513372014-06-25 14:58:08 +1000407 error = -ENOMEM;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100408 goto out_free_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 }
410
411 /*
412 * This could deadlock.
413 *
414 * But until all the XFS lowlevel code is revamped to
415 * handle buffer allocation failures we can't do much.
416 */
417 if (!(++retries % 100))
Dave Chinner4f107002011-03-07 10:00:35 +1100418 xfs_err(NULL,
Tetsuo Handa5bf97b12015-10-12 15:41:29 +1100419 "%s(%u) possible memory allocation deadlock in %s (mode:0x%x)",
420 current->comm, current->pid,
Harvey Harrison34a622b2008-04-10 12:19:21 +1000421 __func__, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100423 XFS_STATS_INC(bp->b_target->bt_mount, xb_page_retries);
Jens Axboe8aa7e842009-07-09 14:52:32 +0200424 congestion_wait(BLK_RW_ASYNC, HZ/50);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 goto retry;
426 }
427
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100428 XFS_STATS_INC(bp->b_target->bt_mount, xb_page_found);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429
Dave Chinner0e6e8472011-03-26 09:16:45 +1100430 nbytes = min_t(size_t, size, PAGE_SIZE - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 size -= nbytes;
Nathan Scottce8e9222006-01-11 15:39:08 +1100432 bp->b_pages[i] = page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 offset = 0;
434 }
Dave Chinner0e6e8472011-03-26 09:16:45 +1100435 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
Dave Chinner0e6e8472011-03-26 09:16:45 +1100437out_free_pages:
438 for (i = 0; i < bp->b_page_count; i++)
439 __free_page(bp->b_pages[i]);
Darrick J. Wong2aa6ba7b2017-01-25 20:24:57 -0800440 bp->b_flags &= ~_XBF_PAGES;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 return error;
442}
443
444/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300445 * Map buffer into kernel address-space if necessary.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 */
447STATIC int
Nathan Scottce8e9222006-01-11 15:39:08 +1100448_xfs_buf_map_pages(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 xfs_buf_t *bp,
450 uint flags)
451{
Dave Chinner0e6e8472011-03-26 09:16:45 +1100452 ASSERT(bp->b_flags & _XBF_PAGES);
Nathan Scottce8e9222006-01-11 15:39:08 +1100453 if (bp->b_page_count == 1) {
Dave Chinner0e6e8472011-03-26 09:16:45 +1100454 /* A single page buffer is always mappable */
Nathan Scottce8e9222006-01-11 15:39:08 +1100455 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
Dave Chinner611c9942012-04-23 15:59:07 +1000456 } else if (flags & XBF_UNMAPPED) {
457 bp->b_addr = NULL;
458 } else {
Dave Chinnera19fb382011-03-26 09:13:42 +1100459 int retried = 0;
Michal Hocko9ba1fb22017-05-03 14:53:19 -0700460 unsigned nofs_flag;
Dave Chinnera19fb382011-03-26 09:13:42 +1100461
Dave Chinnerae687e52014-03-07 16:19:14 +1100462 /*
463 * vm_map_ram() will allocate auxillary structures (e.g.
464 * pagetables) with GFP_KERNEL, yet we are likely to be under
465 * GFP_NOFS context here. Hence we need to tell memory reclaim
Michal Hocko9ba1fb22017-05-03 14:53:19 -0700466 * that we are in such a context via PF_MEMALLOC_NOFS to prevent
Dave Chinnerae687e52014-03-07 16:19:14 +1100467 * memory reclaim re-entering the filesystem here and
468 * potentially deadlocking.
469 */
Michal Hocko9ba1fb22017-05-03 14:53:19 -0700470 nofs_flag = memalloc_nofs_save();
Dave Chinnera19fb382011-03-26 09:13:42 +1100471 do {
472 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
473 -1, PAGE_KERNEL);
474 if (bp->b_addr)
475 break;
476 vm_unmap_aliases();
477 } while (retried++ <= 1);
Michal Hocko9ba1fb22017-05-03 14:53:19 -0700478 memalloc_nofs_restore(nofs_flag);
Dave Chinnera19fb382011-03-26 09:13:42 +1100479
480 if (!bp->b_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 return -ENOMEM;
Nathan Scottce8e9222006-01-11 15:39:08 +1100482 bp->b_addr += bp->b_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 }
484
485 return 0;
486}
487
488/*
489 * Finding and Reading Buffers
490 */
Lucas Stach6031e732016-12-07 17:36:36 +1100491static int
492_xfs_buf_obj_cmp(
493 struct rhashtable_compare_arg *arg,
494 const void *obj)
495{
496 const struct xfs_buf_map *map = arg->key;
497 const struct xfs_buf *bp = obj;
498
499 /*
500 * The key hashing in the lookup path depends on the key being the
501 * first element of the compare_arg, make sure to assert this.
502 */
503 BUILD_BUG_ON(offsetof(struct xfs_buf_map, bm_bn) != 0);
504
505 if (bp->b_bn != map->bm_bn)
506 return 1;
507
508 if (unlikely(bp->b_length != map->bm_len)) {
509 /*
510 * found a block number match. If the range doesn't
511 * match, the only way this is allowed is if the buffer
512 * in the cache is stale and the transaction that made
513 * it stale has not yet committed. i.e. we are
514 * reallocating a busy extent. Skip this buffer and
515 * continue searching for an exact match.
516 */
517 ASSERT(bp->b_flags & XBF_STALE);
518 return 1;
519 }
520 return 0;
521}
522
523static const struct rhashtable_params xfs_buf_hash_params = {
524 .min_size = 32, /* empty AGs have minimal footprint */
525 .nelem_hint = 16,
526 .key_len = sizeof(xfs_daddr_t),
527 .key_offset = offsetof(struct xfs_buf, b_bn),
528 .head_offset = offsetof(struct xfs_buf, b_rhash_head),
529 .automatic_shrinking = true,
530 .obj_cmpfn = _xfs_buf_obj_cmp,
531};
532
533int
534xfs_buf_hash_init(
535 struct xfs_perag *pag)
536{
537 spin_lock_init(&pag->pag_buf_lock);
538 return rhashtable_init(&pag->pag_buf_hash, &xfs_buf_hash_params);
539}
540
541void
542xfs_buf_hash_destroy(
543 struct xfs_perag *pag)
544{
545 rhashtable_destroy(&pag->pag_buf_hash);
546}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547
548/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100549 * Look up, and creates if absent, a lockable buffer for
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 * a given range of an inode. The buffer is returned
Chandra Seetharamaneabbaf12011-09-08 20:18:50 +0000551 * locked. No I/O is implied by this call.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 */
553xfs_buf_t *
Nathan Scottce8e9222006-01-11 15:39:08 +1100554_xfs_buf_find(
Dave Chinnere70b73f2012-04-23 15:58:49 +1000555 struct xfs_buftarg *btp,
Dave Chinner3e85c862012-06-22 18:50:09 +1000556 struct xfs_buf_map *map,
557 int nmaps,
Nathan Scottce8e9222006-01-11 15:39:08 +1100558 xfs_buf_flags_t flags,
559 xfs_buf_t *new_bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560{
Dave Chinner74f75a02010-09-24 19:59:04 +1000561 struct xfs_perag *pag;
Dave Chinner74f75a02010-09-24 19:59:04 +1000562 xfs_buf_t *bp;
Lucas Stach6031e732016-12-07 17:36:36 +1100563 struct xfs_buf_map cmap = { .bm_bn = map[0].bm_bn };
Dave Chinner10616b802013-01-21 23:53:52 +1100564 xfs_daddr_t eofs;
Dave Chinner3e85c862012-06-22 18:50:09 +1000565 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566
Dave Chinner3e85c862012-06-22 18:50:09 +1000567 for (i = 0; i < nmaps; i++)
Lucas Stach6031e732016-12-07 17:36:36 +1100568 cmap.bm_len += map[i].bm_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569
570 /* Check for IOs smaller than the sector size / not sector aligned */
Lucas Stach6031e732016-12-07 17:36:36 +1100571 ASSERT(!(BBTOB(cmap.bm_len) < btp->bt_meta_sectorsize));
572 ASSERT(!(BBTOB(cmap.bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
Dave Chinner10616b802013-01-21 23:53:52 +1100574 /*
575 * Corrupted block numbers can get through to here, unfortunately, so we
576 * have to check that the buffer falls within the filesystem bounds.
577 */
578 eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
Lucas Stach6031e732016-12-07 17:36:36 +1100579 if (cmap.bm_bn < 0 || cmap.bm_bn >= eofs) {
Dave Chinner10616b802013-01-21 23:53:52 +1100580 /*
Dave Chinner24513372014-06-25 14:58:08 +1000581 * XXX (dgc): we should really be returning -EFSCORRUPTED here,
Dave Chinner10616b802013-01-21 23:53:52 +1100582 * but none of the higher level infrastructure supports
583 * returning a specific error on buffer lookup failures.
584 */
585 xfs_alert(btp->bt_mount,
586 "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
Lucas Stach6031e732016-12-07 17:36:36 +1100587 __func__, cmap.bm_bn, eofs);
Dave Chinner7bc0dc22013-05-21 18:02:08 +1000588 WARN_ON(1);
Dave Chinner10616b802013-01-21 23:53:52 +1100589 return NULL;
590 }
591
Dave Chinner74f75a02010-09-24 19:59:04 +1000592 pag = xfs_perag_get(btp->bt_mount,
Lucas Stach6031e732016-12-07 17:36:36 +1100593 xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
Dave Chinner74f75a02010-09-24 19:59:04 +1000595 spin_lock(&pag->pag_buf_lock);
Lucas Stach6031e732016-12-07 17:36:36 +1100596 bp = rhashtable_lookup_fast(&pag->pag_buf_hash, &cmap,
597 xfs_buf_hash_params);
598 if (bp) {
599 atomic_inc(&bp->b_hold);
600 goto found;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 }
602
603 /* No match found */
Nathan Scottce8e9222006-01-11 15:39:08 +1100604 if (new_bp) {
Dave Chinner74f75a02010-09-24 19:59:04 +1000605 /* the buffer keeps the perag reference until it is freed */
606 new_bp->b_pag = pag;
Lucas Stach6031e732016-12-07 17:36:36 +1100607 rhashtable_insert_fast(&pag->pag_buf_hash,
608 &new_bp->b_rhash_head,
609 xfs_buf_hash_params);
Dave Chinner74f75a02010-09-24 19:59:04 +1000610 spin_unlock(&pag->pag_buf_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 } else {
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100612 XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
Dave Chinner74f75a02010-09-24 19:59:04 +1000613 spin_unlock(&pag->pag_buf_lock);
614 xfs_perag_put(pag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 }
Nathan Scottce8e9222006-01-11 15:39:08 +1100616 return new_bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
618found:
Dave Chinner74f75a02010-09-24 19:59:04 +1000619 spin_unlock(&pag->pag_buf_lock);
620 xfs_perag_put(pag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200622 if (!xfs_buf_trylock(bp)) {
623 if (flags & XBF_TRYLOCK) {
Nathan Scottce8e9222006-01-11 15:39:08 +1100624 xfs_buf_rele(bp);
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100625 XFS_STATS_INC(btp->bt_mount, xb_busy_locked);
Nathan Scottce8e9222006-01-11 15:39:08 +1100626 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 }
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200628 xfs_buf_lock(bp);
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100629 XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 }
631
Dave Chinner0e6e8472011-03-26 09:16:45 +1100632 /*
633 * if the buffer is stale, clear all the external state associated with
634 * it. We need to keep flags such as how we allocated the buffer memory
635 * intact here.
636 */
Nathan Scottce8e9222006-01-11 15:39:08 +1100637 if (bp->b_flags & XBF_STALE) {
638 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
Dave Chinnercfb02852012-11-12 22:54:19 +1100639 ASSERT(bp->b_iodone == NULL);
Dave Chinner611c9942012-04-23 15:59:07 +1000640 bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
Dave Chinner1813dd62012-11-14 17:54:40 +1100641 bp->b_ops = NULL;
David Chinner2f926582005-09-05 08:33:35 +1000642 }
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000643
644 trace_xfs_buf_find(bp, flags, _RET_IP_);
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100645 XFS_STATS_INC(btp->bt_mount, xb_get_locked);
Nathan Scottce8e9222006-01-11 15:39:08 +1100646 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647}
648
649/*
Dave Chinner38158322011-09-30 04:45:02 +0000650 * Assembles a buffer covering the specified range. The code is optimised for
651 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
652 * more hits than misses.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 */
Dave Chinner38158322011-09-30 04:45:02 +0000654struct xfs_buf *
Dave Chinner6dde2702012-06-22 18:50:10 +1000655xfs_buf_get_map(
656 struct xfs_buftarg *target,
657 struct xfs_buf_map *map,
658 int nmaps,
Nathan Scottce8e9222006-01-11 15:39:08 +1100659 xfs_buf_flags_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660{
Dave Chinner38158322011-09-30 04:45:02 +0000661 struct xfs_buf *bp;
662 struct xfs_buf *new_bp;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100663 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664
Dave Chinner6dde2702012-06-22 18:50:10 +1000665 bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
Dave Chinner38158322011-09-30 04:45:02 +0000666 if (likely(bp))
667 goto found;
668
Dave Chinner6dde2702012-06-22 18:50:10 +1000669 new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
Nathan Scottce8e9222006-01-11 15:39:08 +1100670 if (unlikely(!new_bp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 return NULL;
672
Dave Chinnerfe2429b2012-04-23 15:58:45 +1000673 error = xfs_buf_allocate_memory(new_bp, flags);
674 if (error) {
Dave Chinner3e85c862012-06-22 18:50:09 +1000675 xfs_buf_free(new_bp);
Dave Chinner38158322011-09-30 04:45:02 +0000676 return NULL;
677 }
678
Dave Chinner6dde2702012-06-22 18:50:10 +1000679 bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
Dave Chinnerfe2429b2012-04-23 15:58:45 +1000680 if (!bp) {
681 xfs_buf_free(new_bp);
682 return NULL;
683 }
684
685 if (bp != new_bp)
686 xfs_buf_free(new_bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
Dave Chinner38158322011-09-30 04:45:02 +0000688found:
Dave Chinner611c9942012-04-23 15:59:07 +1000689 if (!bp->b_addr) {
Nathan Scottce8e9222006-01-11 15:39:08 +1100690 error = _xfs_buf_map_pages(bp, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 if (unlikely(error)) {
Dave Chinner4f107002011-03-07 10:00:35 +1100692 xfs_warn(target->bt_mount,
Eric Sandeen08e96e12013-10-11 20:59:05 -0500693 "%s: failed to map pagesn", __func__);
Dave Chinnera8acad72012-04-23 15:58:54 +1000694 xfs_buf_relse(bp);
695 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 }
697 }
698
Dave Chinnerb79f4a12016-01-12 07:03:44 +1100699 /*
700 * Clear b_error if this is a lookup from a caller that doesn't expect
701 * valid data to be found in the buffer.
702 */
703 if (!(flags & XBF_READ))
704 xfs_buf_ioerror(bp, 0);
705
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100706 XFS_STATS_INC(target->bt_mount, xb_get);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000707 trace_xfs_buf_get(bp, flags, _RET_IP_);
Nathan Scottce8e9222006-01-11 15:39:08 +1100708 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709}
710
Christoph Hellwig5d765b92008-12-03 12:20:26 +0100711STATIC int
712_xfs_buf_read(
713 xfs_buf_t *bp,
714 xfs_buf_flags_t flags)
715{
Christoph Hellwig43ff2122012-04-23 15:58:39 +1000716 ASSERT(!(flags & XBF_WRITE));
Mark Tinguelyf4b42422012-12-04 17:18:02 -0600717 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
Christoph Hellwig5d765b92008-12-03 12:20:26 +0100718
Christoph Hellwig43ff2122012-04-23 15:58:39 +1000719 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
Christoph Hellwig1d5ae5d2011-07-08 14:36:32 +0200720 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
Christoph Hellwig5d765b92008-12-03 12:20:26 +0100721
Dave Chinner595bff72014-10-02 09:05:14 +1000722 if (flags & XBF_ASYNC) {
723 xfs_buf_submit(bp);
Dave Chinner0e95f192012-04-23 15:58:46 +1000724 return 0;
Dave Chinner595bff72014-10-02 09:05:14 +1000725 }
726 return xfs_buf_submit_wait(bp);
Christoph Hellwig5d765b92008-12-03 12:20:26 +0100727}
728
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729xfs_buf_t *
Dave Chinner6dde2702012-06-22 18:50:10 +1000730xfs_buf_read_map(
731 struct xfs_buftarg *target,
732 struct xfs_buf_map *map,
733 int nmaps,
Dave Chinnerc3f8fc72012-11-12 22:54:01 +1100734 xfs_buf_flags_t flags,
Dave Chinner1813dd62012-11-14 17:54:40 +1100735 const struct xfs_buf_ops *ops)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736{
Dave Chinner6dde2702012-06-22 18:50:10 +1000737 struct xfs_buf *bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738
Nathan Scottce8e9222006-01-11 15:39:08 +1100739 flags |= XBF_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740
Dave Chinner6dde2702012-06-22 18:50:10 +1000741 bp = xfs_buf_get_map(target, map, nmaps, flags);
Nathan Scottce8e9222006-01-11 15:39:08 +1100742 if (bp) {
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000743 trace_xfs_buf_read(bp, flags, _RET_IP_);
744
Dave Chinnerb0388bf2016-02-10 15:01:11 +1100745 if (!(bp->b_flags & XBF_DONE)) {
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100746 XFS_STATS_INC(target->bt_mount, xb_get_read);
Dave Chinner1813dd62012-11-14 17:54:40 +1100747 bp->b_ops = ops;
Christoph Hellwig5d765b92008-12-03 12:20:26 +0100748 _xfs_buf_read(bp, flags);
Nathan Scottce8e9222006-01-11 15:39:08 +1100749 } else if (flags & XBF_ASYNC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 /*
751 * Read ahead call which is already satisfied,
752 * drop the buffer
753 */
Dave Chinnera8acad72012-04-23 15:58:54 +1000754 xfs_buf_relse(bp);
755 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 /* We do not want read in the flags */
Nathan Scottce8e9222006-01-11 15:39:08 +1100758 bp->b_flags &= ~XBF_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 }
760 }
761
Nathan Scottce8e9222006-01-11 15:39:08 +1100762 return bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763}
764
765/*
Nathan Scottce8e9222006-01-11 15:39:08 +1100766 * If we are not low on memory then do the readahead in a deadlock
767 * safe manner.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 */
769void
Dave Chinner6dde2702012-06-22 18:50:10 +1000770xfs_buf_readahead_map(
771 struct xfs_buftarg *target,
772 struct xfs_buf_map *map,
Dave Chinnerc3f8fc72012-11-12 22:54:01 +1100773 int nmaps,
Dave Chinner1813dd62012-11-14 17:54:40 +1100774 const struct xfs_buf_ops *ops)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775{
Jan Karaefa7c9f2017-02-02 15:56:53 +0100776 if (bdi_read_congested(target->bt_bdev->bd_bdi))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 return;
778
Dave Chinner6dde2702012-06-22 18:50:10 +1000779 xfs_buf_read_map(target, map, nmaps,
Dave Chinner1813dd62012-11-14 17:54:40 +1100780 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781}
782
Dave Chinner5adc94c2010-09-24 21:58:31 +1000783/*
784 * Read an uncached buffer from disk. Allocates and returns a locked
785 * buffer containing the disk contents or nothing.
786 */
Dave Chinnerba372672014-10-02 09:05:32 +1000787int
Dave Chinner5adc94c2010-09-24 21:58:31 +1000788xfs_buf_read_uncached(
Dave Chinner5adc94c2010-09-24 21:58:31 +1000789 struct xfs_buftarg *target,
790 xfs_daddr_t daddr,
Dave Chinnere70b73f2012-04-23 15:58:49 +1000791 size_t numblks,
Dave Chinnerc3f8fc72012-11-12 22:54:01 +1100792 int flags,
Dave Chinnerba372672014-10-02 09:05:32 +1000793 struct xfs_buf **bpp,
Dave Chinner1813dd62012-11-14 17:54:40 +1100794 const struct xfs_buf_ops *ops)
Dave Chinner5adc94c2010-09-24 21:58:31 +1000795{
Dave Chinnereab4e632012-11-12 22:54:02 +1100796 struct xfs_buf *bp;
Dave Chinner5adc94c2010-09-24 21:58:31 +1000797
Dave Chinnerba372672014-10-02 09:05:32 +1000798 *bpp = NULL;
799
Dave Chinnere70b73f2012-04-23 15:58:49 +1000800 bp = xfs_buf_get_uncached(target, numblks, flags);
Dave Chinner5adc94c2010-09-24 21:58:31 +1000801 if (!bp)
Dave Chinnerba372672014-10-02 09:05:32 +1000802 return -ENOMEM;
Dave Chinner5adc94c2010-09-24 21:58:31 +1000803
804 /* set up the buffer for a read IO */
Dave Chinner3e85c862012-06-22 18:50:09 +1000805 ASSERT(bp->b_map_count == 1);
Dave Chinnerba372672014-10-02 09:05:32 +1000806 bp->b_bn = XFS_BUF_DADDR_NULL; /* always null for uncached buffers */
Dave Chinner3e85c862012-06-22 18:50:09 +1000807 bp->b_maps[0].bm_bn = daddr;
Dave Chinnercbb7baa2012-06-22 18:50:08 +1000808 bp->b_flags |= XBF_READ;
Dave Chinner1813dd62012-11-14 17:54:40 +1100809 bp->b_ops = ops;
Dave Chinner5adc94c2010-09-24 21:58:31 +1000810
Dave Chinner595bff72014-10-02 09:05:14 +1000811 xfs_buf_submit_wait(bp);
Dave Chinnerba372672014-10-02 09:05:32 +1000812 if (bp->b_error) {
813 int error = bp->b_error;
Christoph Hellwig83a0adc2013-12-17 00:03:52 -0800814 xfs_buf_relse(bp);
Dave Chinnerba372672014-10-02 09:05:32 +1000815 return error;
Christoph Hellwig83a0adc2013-12-17 00:03:52 -0800816 }
Dave Chinnerba372672014-10-02 09:05:32 +1000817
818 *bpp = bp;
819 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820}
821
Dave Chinner44396472011-04-21 09:34:27 +0000822/*
823 * Return a buffer allocated as an empty buffer and associated to external
824 * memory via xfs_buf_associate_memory() back to it's empty state.
825 */
826void
827xfs_buf_set_empty(
828 struct xfs_buf *bp,
Dave Chinnere70b73f2012-04-23 15:58:49 +1000829 size_t numblks)
Dave Chinner44396472011-04-21 09:34:27 +0000830{
831 if (bp->b_pages)
832 _xfs_buf_free_pages(bp);
833
834 bp->b_pages = NULL;
835 bp->b_page_count = 0;
836 bp->b_addr = NULL;
Dave Chinner4e94b712012-04-23 15:58:51 +1000837 bp->b_length = numblks;
Dave Chinneraa0e8832012-04-23 15:58:52 +1000838 bp->b_io_length = numblks;
Dave Chinner3e85c862012-06-22 18:50:09 +1000839
840 ASSERT(bp->b_map_count == 1);
Dave Chinner44396472011-04-21 09:34:27 +0000841 bp->b_bn = XFS_BUF_DADDR_NULL;
Dave Chinner3e85c862012-06-22 18:50:09 +1000842 bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
843 bp->b_maps[0].bm_len = bp->b_length;
Dave Chinner44396472011-04-21 09:34:27 +0000844}
845
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846static inline struct page *
847mem_to_page(
848 void *addr)
849{
Christoph Lameter9e2779f2008-02-04 22:28:34 -0800850 if ((!is_vmalloc_addr(addr))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 return virt_to_page(addr);
852 } else {
853 return vmalloc_to_page(addr);
854 }
855}
856
857int
Nathan Scottce8e9222006-01-11 15:39:08 +1100858xfs_buf_associate_memory(
859 xfs_buf_t *bp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 void *mem,
861 size_t len)
862{
863 int rval;
864 int i = 0;
Lachlan McIlroyd1afb672007-11-27 17:01:24 +1100865 unsigned long pageaddr;
866 unsigned long offset;
867 size_t buflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 int page_count;
869
Dave Chinner0e6e8472011-03-26 09:16:45 +1100870 pageaddr = (unsigned long)mem & PAGE_MASK;
Lachlan McIlroyd1afb672007-11-27 17:01:24 +1100871 offset = (unsigned long)mem - pageaddr;
Dave Chinner0e6e8472011-03-26 09:16:45 +1100872 buflen = PAGE_ALIGN(len + offset);
873 page_count = buflen >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874
875 /* Free any previous set of page pointers */
Nathan Scottce8e9222006-01-11 15:39:08 +1100876 if (bp->b_pages)
877 _xfs_buf_free_pages(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
Nathan Scottce8e9222006-01-11 15:39:08 +1100879 bp->b_pages = NULL;
880 bp->b_addr = mem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
Eric Sandeen87937bf2014-04-14 19:01:20 +1000882 rval = _xfs_buf_get_pages(bp, page_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 if (rval)
884 return rval;
885
Nathan Scottce8e9222006-01-11 15:39:08 +1100886 bp->b_offset = offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887
Lachlan McIlroyd1afb672007-11-27 17:01:24 +1100888 for (i = 0; i < bp->b_page_count; i++) {
889 bp->b_pages[i] = mem_to_page((void *)pageaddr);
Dave Chinner0e6e8472011-03-26 09:16:45 +1100890 pageaddr += PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892
Dave Chinneraa0e8832012-04-23 15:58:52 +1000893 bp->b_io_length = BTOBB(len);
Dave Chinner4e94b712012-04-23 15:58:51 +1000894 bp->b_length = BTOBB(buflen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895
896 return 0;
897}
898
899xfs_buf_t *
Dave Chinner686865f2010-09-24 20:07:47 +1000900xfs_buf_get_uncached(
901 struct xfs_buftarg *target,
Dave Chinnere70b73f2012-04-23 15:58:49 +1000902 size_t numblks,
Dave Chinner686865f2010-09-24 20:07:47 +1000903 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904{
Dave Chinnere70b73f2012-04-23 15:58:49 +1000905 unsigned long page_count;
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000906 int error, i;
Dave Chinner3e85c862012-06-22 18:50:09 +1000907 struct xfs_buf *bp;
908 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909
Brian Fosterc891c302016-07-20 11:13:43 +1000910 /* flags might contain irrelevant bits, pass only what we care about */
911 bp = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 if (unlikely(bp == NULL))
913 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914
Dave Chinnere70b73f2012-04-23 15:58:49 +1000915 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
Eric Sandeen87937bf2014-04-14 19:01:20 +1000916 error = _xfs_buf_get_pages(bp, page_count);
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000917 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 goto fail_free_buf;
919
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000920 for (i = 0; i < page_count; i++) {
Dave Chinner686865f2010-09-24 20:07:47 +1000921 bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000922 if (!bp->b_pages[i])
923 goto fail_free_mem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 }
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000925 bp->b_flags |= _XBF_PAGES;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926
Dave Chinner611c9942012-04-23 15:59:07 +1000927 error = _xfs_buf_map_pages(bp, 0);
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000928 if (unlikely(error)) {
Dave Chinner4f107002011-03-07 10:00:35 +1100929 xfs_warn(target->bt_mount,
Eric Sandeen08e96e12013-10-11 20:59:05 -0500930 "%s: failed to map pages", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 goto fail_free_mem;
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000932 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933
Dave Chinner686865f2010-09-24 20:07:47 +1000934 trace_xfs_buf_get_uncached(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 return bp;
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000936
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 fail_free_mem:
Christoph Hellwig1fa40b02007-05-14 18:23:50 +1000938 while (--i >= 0)
939 __free_page(bp->b_pages[i]);
Christoph Hellwigca165b82007-05-24 15:21:11 +1000940 _xfs_buf_free_pages(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 fail_free_buf:
Dave Chinner3e85c862012-06-22 18:50:09 +1000942 xfs_buf_free_maps(bp);
Christoph Hellwig4347b9d2011-10-10 16:52:48 +0000943 kmem_zone_free(xfs_buf_zone, bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 fail:
945 return NULL;
946}
947
948/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 * Increment reference count on buffer, to hold the buffer concurrently
950 * with another thread which may release (free) the buffer asynchronously.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 * Must hold the buffer already to call this function.
952 */
953void
Nathan Scottce8e9222006-01-11 15:39:08 +1100954xfs_buf_hold(
955 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956{
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000957 trace_xfs_buf_hold(bp, _RET_IP_);
Nathan Scottce8e9222006-01-11 15:39:08 +1100958 atomic_inc(&bp->b_hold);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959}
960
961/*
Brian Foster9c7504a2016-07-20 11:15:28 +1000962 * Release a hold on the specified buffer. If the hold count is 1, the buffer is
963 * placed on LRU or freed (depending on b_lru_ref).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 */
965void
Nathan Scottce8e9222006-01-11 15:39:08 +1100966xfs_buf_rele(
967 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968{
Dave Chinner74f75a02010-09-24 19:59:04 +1000969 struct xfs_perag *pag = bp->b_pag;
Brian Foster9c7504a2016-07-20 11:15:28 +1000970 bool release;
971 bool freebuf = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000973 trace_xfs_buf_rele(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
Dave Chinner74f75a02010-09-24 19:59:04 +1000975 if (!pag) {
Dave Chinner430cbeb2010-12-02 16:30:55 +1100976 ASSERT(list_empty(&bp->b_lru));
Brian Foster9c7504a2016-07-20 11:15:28 +1000977 if (atomic_dec_and_test(&bp->b_hold)) {
978 xfs_buf_ioacct_dec(bp);
Nathan Scottfad3aa12006-02-01 12:14:52 +1100979 xfs_buf_free(bp);
Brian Foster9c7504a2016-07-20 11:15:28 +1000980 }
Nathan Scottfad3aa12006-02-01 12:14:52 +1100981 return;
982 }
983
Lachlan McIlroy37906892008-08-13 15:42:10 +1000984 ASSERT(atomic_read(&bp->b_hold) > 0);
Dave Chinnera4082352013-08-28 10:18:06 +1000985
Brian Foster9c7504a2016-07-20 11:15:28 +1000986 release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
987 spin_lock(&bp->b_lock);
988 if (!release) {
989 /*
990 * Drop the in-flight state if the buffer is already on the LRU
991 * and it holds the only reference. This is racy because we
992 * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT
993 * ensures the decrement occurs only once per-buf.
994 */
995 if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
Brian Foster63db7c82017-05-31 08:22:52 -0700996 __xfs_buf_ioacct_dec(bp);
Brian Foster9c7504a2016-07-20 11:15:28 +1000997 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 }
Brian Foster9c7504a2016-07-20 11:15:28 +1000999
1000 /* the last reference has been dropped ... */
Brian Foster63db7c82017-05-31 08:22:52 -07001001 __xfs_buf_ioacct_dec(bp);
Brian Foster9c7504a2016-07-20 11:15:28 +10001002 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
1003 /*
1004 * If the buffer is added to the LRU take a new reference to the
1005 * buffer for the LRU and clear the (now stale) dispose list
1006 * state flag
1007 */
1008 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
1009 bp->b_state &= ~XFS_BSTATE_DISPOSE;
1010 atomic_inc(&bp->b_hold);
1011 }
1012 spin_unlock(&pag->pag_buf_lock);
1013 } else {
1014 /*
1015 * most of the time buffers will already be removed from the
1016 * LRU, so optimise that case by checking for the
1017 * XFS_BSTATE_DISPOSE flag indicating the last list the buffer
1018 * was on was the disposal list
1019 */
1020 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
1021 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
1022 } else {
1023 ASSERT(list_empty(&bp->b_lru));
1024 }
1025
1026 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
Lucas Stach6031e732016-12-07 17:36:36 +11001027 rhashtable_remove_fast(&pag->pag_buf_hash, &bp->b_rhash_head,
1028 xfs_buf_hash_params);
Brian Foster9c7504a2016-07-20 11:15:28 +10001029 spin_unlock(&pag->pag_buf_lock);
1030 xfs_perag_put(pag);
1031 freebuf = true;
1032 }
1033
1034out_unlock:
1035 spin_unlock(&bp->b_lock);
1036
1037 if (freebuf)
1038 xfs_buf_free(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039}
1040
1041
1042/*
Dave Chinner0e6e8472011-03-26 09:16:45 +11001043 * Lock a buffer object, if it is not already locked.
Dave Chinner90810b92010-11-30 15:16:16 +11001044 *
1045 * If we come across a stale, pinned, locked buffer, we know that we are
1046 * being asked to lock a buffer that has been reallocated. Because it is
1047 * pinned, we know that the log has not been pushed to disk and hence it
1048 * will still be locked. Rather than continuing to have trylock attempts
1049 * fail until someone else pushes the log, push it ourselves before
1050 * returning. This means that the xfsaild will not get stuck trying
1051 * to push on stale inode buffers.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 */
1053int
Christoph Hellwig0c842ad2011-07-08 14:36:19 +02001054xfs_buf_trylock(
1055 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056{
1057 int locked;
1058
Nathan Scottce8e9222006-01-11 15:39:08 +11001059 locked = down_trylock(&bp->b_sema) == 0;
Darrick J. Wong479c6412016-06-21 11:53:28 +10001060 if (locked) {
Nathan Scottce8e9222006-01-11 15:39:08 +11001061 XB_SET_OWNER(bp);
Darrick J. Wong479c6412016-06-21 11:53:28 +10001062 trace_xfs_buf_trylock(bp, _RET_IP_);
1063 } else {
1064 trace_xfs_buf_trylock_fail(bp, _RET_IP_);
1065 }
Christoph Hellwig0c842ad2011-07-08 14:36:19 +02001066 return locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068
1069/*
Dave Chinner0e6e8472011-03-26 09:16:45 +11001070 * Lock a buffer object.
Dave Chinnered3b4d62010-05-21 12:07:08 +10001071 *
1072 * If we come across a stale, pinned, locked buffer, we know that we
1073 * are being asked to lock a buffer that has been reallocated. Because
1074 * it is pinned, we know that the log has not been pushed to disk and
1075 * hence it will still be locked. Rather than sleeping until someone
1076 * else pushes the log, push it ourselves before trying to get the lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 */
Nathan Scottce8e9222006-01-11 15:39:08 +11001078void
1079xfs_buf_lock(
Christoph Hellwig0c842ad2011-07-08 14:36:19 +02001080 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081{
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001082 trace_xfs_buf_lock(bp, _RET_IP_);
1083
Dave Chinnered3b4d62010-05-21 12:07:08 +10001084 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
Dave Chinnerebad8612010-09-22 10:47:20 +10001085 xfs_log_force(bp->b_target->bt_mount, 0);
Nathan Scottce8e9222006-01-11 15:39:08 +11001086 down(&bp->b_sema);
1087 XB_SET_OWNER(bp);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001088
1089 trace_xfs_buf_lock_done(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090}
1091
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092void
Nathan Scottce8e9222006-01-11 15:39:08 +11001093xfs_buf_unlock(
Christoph Hellwig0c842ad2011-07-08 14:36:19 +02001094 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095{
Brian Foster20e8a062017-04-21 12:40:44 -07001096 ASSERT(xfs_buf_islocked(bp));
1097
Nathan Scottce8e9222006-01-11 15:39:08 +11001098 XB_CLEAR_OWNER(bp);
1099 up(&bp->b_sema);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001100
1101 trace_xfs_buf_unlock(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102}
1103
Nathan Scottce8e9222006-01-11 15:39:08 +11001104STATIC void
1105xfs_buf_wait_unpin(
1106 xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107{
1108 DECLARE_WAITQUEUE (wait, current);
1109
Nathan Scottce8e9222006-01-11 15:39:08 +11001110 if (atomic_read(&bp->b_pin_count) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 return;
1112
Nathan Scottce8e9222006-01-11 15:39:08 +11001113 add_wait_queue(&bp->b_waiters, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 for (;;) {
1115 set_current_state(TASK_UNINTERRUPTIBLE);
Nathan Scottce8e9222006-01-11 15:39:08 +11001116 if (atomic_read(&bp->b_pin_count) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 break;
Jens Axboe7eaceac2011-03-10 08:52:07 +01001118 io_schedule();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 }
Nathan Scottce8e9222006-01-11 15:39:08 +11001120 remove_wait_queue(&bp->b_waiters, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 set_current_state(TASK_RUNNING);
1122}
1123
1124/*
1125 * Buffer Utility Routines
1126 */
1127
Dave Chinnere8aaba92014-10-02 09:04:22 +10001128void
1129xfs_buf_ioend(
1130 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131{
Dave Chinnere8aaba92014-10-02 09:04:22 +10001132 bool read = bp->b_flags & XBF_READ;
1133
1134 trace_xfs_buf_iodone(bp, _RET_IP_);
Dave Chinner1813dd62012-11-14 17:54:40 +11001135
1136 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
Dave Chinnerd5929de2013-02-27 13:25:54 +11001137
Dave Chinner61be9c52014-10-02 09:04:31 +10001138 /*
1139 * Pull in IO completion errors now. We are guaranteed to be running
1140 * single threaded, so we don't need the lock to read b_io_error.
1141 */
1142 if (!bp->b_error && bp->b_io_error)
1143 xfs_buf_ioerror(bp, bp->b_io_error);
1144
Dave Chinnere8aaba92014-10-02 09:04:22 +10001145 /* Only validate buffers that were read without errors */
1146 if (read && !bp->b_error && bp->b_ops) {
1147 ASSERT(!bp->b_iodone);
Dave Chinner1813dd62012-11-14 17:54:40 +11001148 bp->b_ops->verify_read(bp);
Dave Chinnere8aaba92014-10-02 09:04:22 +10001149 }
1150
1151 if (!bp->b_error)
1152 bp->b_flags |= XBF_DONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
Christoph Hellwig80f6c292010-08-18 05:29:11 -04001154 if (bp->b_iodone)
Nathan Scottce8e9222006-01-11 15:39:08 +11001155 (*(bp->b_iodone))(bp);
1156 else if (bp->b_flags & XBF_ASYNC)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157 xfs_buf_relse(bp);
Dave Chinner595bff72014-10-02 09:05:14 +10001158 else
Dave Chinner1813dd62012-11-14 17:54:40 +11001159 complete(&bp->b_iowait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160}
1161
Dave Chinnere8aaba92014-10-02 09:04:22 +10001162static void
1163xfs_buf_ioend_work(
1164 struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165{
Dave Chinnere8aaba92014-10-02 09:04:22 +10001166 struct xfs_buf *bp =
Brian Fosterb29c70f2014-12-04 09:43:17 +11001167 container_of(work, xfs_buf_t, b_ioend_work);
Dave Chinner1813dd62012-11-14 17:54:40 +11001168
Dave Chinnere8aaba92014-10-02 09:04:22 +10001169 xfs_buf_ioend(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170}
1171
Alexander Kuleshov211fe1a2016-01-04 16:10:42 +11001172static void
Dave Chinnere8aaba92014-10-02 09:04:22 +10001173xfs_buf_ioend_async(
1174 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175{
Brian Fosterb29c70f2014-12-04 09:43:17 +11001176 INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
1177 queue_work(bp->b_ioend_wq, &bp->b_ioend_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178}
1179
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180void
Nathan Scottce8e9222006-01-11 15:39:08 +11001181xfs_buf_ioerror(
1182 xfs_buf_t *bp,
1183 int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184{
Dave Chinner24513372014-06-25 14:58:08 +10001185 ASSERT(error <= 0 && error >= -1000);
1186 bp->b_error = error;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001187 trace_xfs_buf_ioerror(bp, error, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188}
1189
Christoph Hellwig901796a2011-10-10 16:52:49 +00001190void
1191xfs_buf_ioerror_alert(
1192 struct xfs_buf *bp,
1193 const char *func)
1194{
1195 xfs_alert(bp->b_target->bt_mount,
Dave Chinneraa0e8832012-04-23 15:58:52 +10001196"metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001197 (uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length);
Christoph Hellwig901796a2011-10-10 16:52:49 +00001198}
1199
Christoph Hellwiga2dcf5d2012-07-13 02:24:10 -04001200int
1201xfs_bwrite(
1202 struct xfs_buf *bp)
1203{
1204 int error;
1205
1206 ASSERT(xfs_buf_islocked(bp));
1207
1208 bp->b_flags |= XBF_WRITE;
Dave Chinner27187752014-10-02 09:04:56 +10001209 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
1210 XBF_WRITE_FAIL | XBF_DONE);
Christoph Hellwiga2dcf5d2012-07-13 02:24:10 -04001211
Dave Chinner595bff72014-10-02 09:05:14 +10001212 error = xfs_buf_submit_wait(bp);
Christoph Hellwiga2dcf5d2012-07-13 02:24:10 -04001213 if (error) {
1214 xfs_force_shutdown(bp->b_target->bt_mount,
1215 SHUTDOWN_META_IO_ERROR);
1216 }
1217 return error;
1218}
1219
Brian Foster9bdd9bd2016-05-18 10:56:41 +10001220static void
Nathan Scottce8e9222006-01-11 15:39:08 +11001221xfs_buf_bio_end_io(
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001222 struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223{
Brian Foster9bdd9bd2016-05-18 10:56:41 +10001224 struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225
Dave Chinner37eb17e2012-11-12 22:09:46 +11001226 /*
1227 * don't overwrite existing errors - otherwise we can lose errors on
1228 * buffers that require multiple bios to complete.
1229 */
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001230 if (bio->bi_status) {
1231 int error = blk_status_to_errno(bio->bi_status);
1232
1233 cmpxchg(&bp->b_io_error, 0, error);
1234 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235
Dave Chinner37eb17e2012-11-12 22:09:46 +11001236 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
James Bottomley73c77e22010-01-25 11:42:24 -06001237 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1238
Dave Chinnere8aaba92014-10-02 09:04:22 +10001239 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1240 xfs_buf_ioend_async(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242}
1243
Dave Chinner3e85c862012-06-22 18:50:09 +10001244static void
1245xfs_buf_ioapply_map(
1246 struct xfs_buf *bp,
1247 int map,
1248 int *buf_offset,
1249 int *count,
Mike Christie50bfcd02016-06-05 14:31:57 -05001250 int op,
1251 int op_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252{
Dave Chinner3e85c862012-06-22 18:50:09 +10001253 int page_index;
1254 int total_nr_pages = bp->b_page_count;
1255 int nr_pages;
1256 struct bio *bio;
1257 sector_t sector = bp->b_maps[map].bm_bn;
1258 int size;
1259 int offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
Nathan Scottce8e9222006-01-11 15:39:08 +11001261 total_nr_pages = bp->b_page_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262
Dave Chinner3e85c862012-06-22 18:50:09 +10001263 /* skip the pages in the buffer before the start offset */
1264 page_index = 0;
1265 offset = *buf_offset;
1266 while (offset >= PAGE_SIZE) {
1267 page_index++;
1268 offset -= PAGE_SIZE;
Christoph Hellwigf538d4d2005-11-02 10:26:59 +11001269 }
1270
Dave Chinner3e85c862012-06-22 18:50:09 +10001271 /*
1272 * Limit the IO size to the length of the current vector, and update the
1273 * remaining IO count for the next time around.
1274 */
1275 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1276 *count -= size;
1277 *buf_offset += size;
Christoph Hellwig34951f52011-07-26 15:06:44 +00001278
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279next_chunk:
Nathan Scottce8e9222006-01-11 15:39:08 +11001280 atomic_inc(&bp->b_io_remaining);
Ming Leic908e382016-05-30 21:34:33 +08001281 nr_pages = min(total_nr_pages, BIO_MAX_PAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282
1283 bio = bio_alloc(GFP_NOIO, nr_pages);
Nathan Scottce8e9222006-01-11 15:39:08 +11001284 bio->bi_bdev = bp->b_target->bt_bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001285 bio->bi_iter.bi_sector = sector;
Nathan Scottce8e9222006-01-11 15:39:08 +11001286 bio->bi_end_io = xfs_buf_bio_end_io;
1287 bio->bi_private = bp;
Mike Christie50bfcd02016-06-05 14:31:57 -05001288 bio_set_op_attrs(bio, op, op_flags);
Dave Chinner0e6e8472011-03-26 09:16:45 +11001289
Dave Chinner3e85c862012-06-22 18:50:09 +10001290 for (; size && nr_pages; nr_pages--, page_index++) {
Dave Chinner0e6e8472011-03-26 09:16:45 +11001291 int rbytes, nbytes = PAGE_SIZE - offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292
1293 if (nbytes > size)
1294 nbytes = size;
1295
Dave Chinner3e85c862012-06-22 18:50:09 +10001296 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1297 offset);
Nathan Scottce8e9222006-01-11 15:39:08 +11001298 if (rbytes < nbytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 break;
1300
1301 offset = 0;
Dave Chinneraa0e8832012-04-23 15:58:52 +10001302 sector += BTOBB(nbytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 size -= nbytes;
1304 total_nr_pages--;
1305 }
1306
Kent Overstreet4f024f32013-10-11 15:44:27 -07001307 if (likely(bio->bi_iter.bi_size)) {
James Bottomley73c77e22010-01-25 11:42:24 -06001308 if (xfs_buf_is_vmapped(bp)) {
1309 flush_kernel_vmap_range(bp->b_addr,
1310 xfs_buf_vmap_len(bp));
1311 }
Mike Christie4e49ea42016-06-05 14:31:41 -05001312 submit_bio(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 if (size)
1314 goto next_chunk;
1315 } else {
Dave Chinner37eb17e2012-11-12 22:09:46 +11001316 /*
1317 * This is guaranteed not to be the last io reference count
Dave Chinner595bff72014-10-02 09:05:14 +10001318 * because the caller (xfs_buf_submit) holds a count itself.
Dave Chinner37eb17e2012-11-12 22:09:46 +11001319 */
1320 atomic_dec(&bp->b_io_remaining);
Dave Chinner24513372014-06-25 14:58:08 +10001321 xfs_buf_ioerror(bp, -EIO);
Dave Chinnerec53d1d2010-07-20 17:52:59 +10001322 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 }
Dave Chinner3e85c862012-06-22 18:50:09 +10001324
1325}
1326
1327STATIC void
1328_xfs_buf_ioapply(
1329 struct xfs_buf *bp)
1330{
1331 struct blk_plug plug;
Mike Christie50bfcd02016-06-05 14:31:57 -05001332 int op;
1333 int op_flags = 0;
Dave Chinner3e85c862012-06-22 18:50:09 +10001334 int offset;
1335 int size;
1336 int i;
1337
Dave Chinnerc163f9a2013-03-12 23:30:34 +11001338 /*
1339 * Make sure we capture only current IO errors rather than stale errors
1340 * left over from previous use of the buffer (e.g. failed readahead).
1341 */
1342 bp->b_error = 0;
1343
Brian Fosterb29c70f2014-12-04 09:43:17 +11001344 /*
1345 * Initialize the I/O completion workqueue if we haven't yet or the
1346 * submitter has not opted to specify a custom one.
1347 */
1348 if (!bp->b_ioend_wq)
1349 bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue;
1350
Dave Chinner3e85c862012-06-22 18:50:09 +10001351 if (bp->b_flags & XBF_WRITE) {
Mike Christie50bfcd02016-06-05 14:31:57 -05001352 op = REQ_OP_WRITE;
Dave Chinner3e85c862012-06-22 18:50:09 +10001353 if (bp->b_flags & XBF_SYNCIO)
Christoph Hellwig70fd7612016-11-01 07:40:10 -06001354 op_flags = REQ_SYNC;
Dave Chinner3e85c862012-06-22 18:50:09 +10001355 if (bp->b_flags & XBF_FUA)
Mike Christie50bfcd02016-06-05 14:31:57 -05001356 op_flags |= REQ_FUA;
Dave Chinner3e85c862012-06-22 18:50:09 +10001357 if (bp->b_flags & XBF_FLUSH)
Mike Christie28a8f0d2016-06-05 14:32:25 -05001358 op_flags |= REQ_PREFLUSH;
Dave Chinner1813dd62012-11-14 17:54:40 +11001359
1360 /*
1361 * Run the write verifier callback function if it exists. If
1362 * this function fails it will mark the buffer with an error and
1363 * the IO should not be dispatched.
1364 */
1365 if (bp->b_ops) {
1366 bp->b_ops->verify_write(bp);
1367 if (bp->b_error) {
1368 xfs_force_shutdown(bp->b_target->bt_mount,
1369 SHUTDOWN_CORRUPT_INCORE);
1370 return;
1371 }
Dave Chinner400b9d82014-08-04 12:42:40 +10001372 } else if (bp->b_bn != XFS_BUF_DADDR_NULL) {
1373 struct xfs_mount *mp = bp->b_target->bt_mount;
1374
1375 /*
1376 * non-crc filesystems don't attach verifiers during
1377 * log recovery, so don't warn for such filesystems.
1378 */
1379 if (xfs_sb_version_hascrc(&mp->m_sb)) {
1380 xfs_warn(mp,
1381 "%s: no ops on block 0x%llx/0x%x",
1382 __func__, bp->b_bn, bp->b_length);
1383 xfs_hex_dump(bp->b_addr, 64);
1384 dump_stack();
1385 }
Dave Chinner1813dd62012-11-14 17:54:40 +11001386 }
Dave Chinner3e85c862012-06-22 18:50:09 +10001387 } else if (bp->b_flags & XBF_READ_AHEAD) {
Mike Christie50bfcd02016-06-05 14:31:57 -05001388 op = REQ_OP_READ;
1389 op_flags = REQ_RAHEAD;
Dave Chinner3e85c862012-06-22 18:50:09 +10001390 } else {
Mike Christie50bfcd02016-06-05 14:31:57 -05001391 op = REQ_OP_READ;
Dave Chinner3e85c862012-06-22 18:50:09 +10001392 }
1393
1394 /* we only use the buffer cache for meta-data */
Mike Christie50bfcd02016-06-05 14:31:57 -05001395 op_flags |= REQ_META;
Dave Chinner3e85c862012-06-22 18:50:09 +10001396
1397 /*
1398 * Walk all the vectors issuing IO on them. Set up the initial offset
1399 * into the buffer and the desired IO size before we start -
1400 * _xfs_buf_ioapply_vec() will modify them appropriately for each
1401 * subsequent call.
1402 */
1403 offset = bp->b_offset;
1404 size = BBTOB(bp->b_io_length);
1405 blk_start_plug(&plug);
1406 for (i = 0; i < bp->b_map_count; i++) {
Mike Christie50bfcd02016-06-05 14:31:57 -05001407 xfs_buf_ioapply_map(bp, i, &offset, &size, op, op_flags);
Dave Chinner3e85c862012-06-22 18:50:09 +10001408 if (bp->b_error)
1409 break;
1410 if (size <= 0)
1411 break; /* all done */
1412 }
1413 blk_finish_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414}
1415
Dave Chinner595bff72014-10-02 09:05:14 +10001416/*
1417 * Asynchronous IO submission path. This transfers the buffer lock ownership and
1418 * the current reference to the IO. It is not safe to reference the buffer after
1419 * a call to this function unless the caller holds an additional reference
1420 * itself.
1421 */
Dave Chinner0e95f192012-04-23 15:58:46 +10001422void
Dave Chinner595bff72014-10-02 09:05:14 +10001423xfs_buf_submit(
1424 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425{
Dave Chinner595bff72014-10-02 09:05:14 +10001426 trace_xfs_buf_submit(bp, _RET_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001428 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
Dave Chinner595bff72014-10-02 09:05:14 +10001429 ASSERT(bp->b_flags & XBF_ASYNC);
1430
1431 /* on shutdown we stale and complete the buffer immediately */
1432 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1433 xfs_buf_ioerror(bp, -EIO);
1434 bp->b_flags &= ~XBF_DONE;
1435 xfs_buf_stale(bp);
1436 xfs_buf_ioend(bp);
1437 return;
1438 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439
Christoph Hellwig375ec692011-08-23 08:28:03 +00001440 if (bp->b_flags & XBF_WRITE)
Nathan Scottce8e9222006-01-11 15:39:08 +11001441 xfs_buf_wait_unpin(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442
Dave Chinner61be9c52014-10-02 09:04:31 +10001443 /* clear the internal error state to avoid spurious errors */
1444 bp->b_io_error = 0;
1445
Eric Sandeen8d6c1212014-04-17 08:15:28 +10001446 /*
Dave Chinner595bff72014-10-02 09:05:14 +10001447 * The caller's reference is released during I/O completion.
1448 * This occurs some time after the last b_io_remaining reference is
1449 * released, so after we drop our Io reference we have to have some
1450 * other reference to ensure the buffer doesn't go away from underneath
1451 * us. Take a direct reference to ensure we have safe access to the
1452 * buffer until we are finished with it.
Dave Chinnere11bb802014-10-02 09:04:11 +10001453 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 xfs_buf_hold(bp);
1455
Eric Sandeen8d6c1212014-04-17 08:15:28 +10001456 /*
Dave Chinnere11bb802014-10-02 09:04:11 +10001457 * Set the count to 1 initially, this will stop an I/O completion
1458 * callout which happens before we have started all the I/O from calling
1459 * xfs_buf_ioend too early.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 */
Nathan Scottce8e9222006-01-11 15:39:08 +11001461 atomic_set(&bp->b_io_remaining, 1);
Brian Foster9c7504a2016-07-20 11:15:28 +10001462 xfs_buf_ioacct_inc(bp);
Nathan Scottce8e9222006-01-11 15:39:08 +11001463 _xfs_buf_ioapply(bp);
Dave Chinnere11bb802014-10-02 09:04:11 +10001464
Eric Sandeen8d6c1212014-04-17 08:15:28 +10001465 /*
Dave Chinner595bff72014-10-02 09:05:14 +10001466 * If _xfs_buf_ioapply failed, we can get back here with only the IO
1467 * reference we took above. If we drop it to zero, run completion so
1468 * that we don't return to the caller with completion still pending.
Eric Sandeen8d6c1212014-04-17 08:15:28 +10001469 */
Dave Chinnere8aaba92014-10-02 09:04:22 +10001470 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
Dave Chinner595bff72014-10-02 09:05:14 +10001471 if (bp->b_error)
Dave Chinnere8aaba92014-10-02 09:04:22 +10001472 xfs_buf_ioend(bp);
1473 else
1474 xfs_buf_ioend_async(bp);
1475 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476
Nathan Scottce8e9222006-01-11 15:39:08 +11001477 xfs_buf_rele(bp);
Dave Chinner595bff72014-10-02 09:05:14 +10001478 /* Note: it is not safe to reference bp now we've dropped our ref */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479}
1480
1481/*
Dave Chinner595bff72014-10-02 09:05:14 +10001482 * Synchronous buffer IO submission path, read or write.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 */
1484int
Dave Chinner595bff72014-10-02 09:05:14 +10001485xfs_buf_submit_wait(
1486 struct xfs_buf *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487{
Dave Chinner595bff72014-10-02 09:05:14 +10001488 int error;
1489
1490 trace_xfs_buf_submit_wait(bp, _RET_IP_);
1491
1492 ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC)));
1493
1494 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1495 xfs_buf_ioerror(bp, -EIO);
1496 xfs_buf_stale(bp);
1497 bp->b_flags &= ~XBF_DONE;
1498 return -EIO;
1499 }
1500
1501 if (bp->b_flags & XBF_WRITE)
1502 xfs_buf_wait_unpin(bp);
1503
1504 /* clear the internal error state to avoid spurious errors */
1505 bp->b_io_error = 0;
1506
1507 /*
1508 * For synchronous IO, the IO does not inherit the submitters reference
1509 * count, nor the buffer lock. Hence we cannot release the reference we
1510 * are about to take until we've waited for all IO completion to occur,
1511 * including any xfs_buf_ioend_async() work that may be pending.
1512 */
1513 xfs_buf_hold(bp);
1514
1515 /*
1516 * Set the count to 1 initially, this will stop an I/O completion
1517 * callout which happens before we have started all the I/O from calling
1518 * xfs_buf_ioend too early.
1519 */
1520 atomic_set(&bp->b_io_remaining, 1);
1521 _xfs_buf_ioapply(bp);
1522
1523 /*
1524 * make sure we run completion synchronously if it raced with us and is
1525 * already complete.
1526 */
1527 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1528 xfs_buf_ioend(bp);
1529
1530 /* wait for completion before gathering the error from the buffer */
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001531 trace_xfs_buf_iowait(bp, _RET_IP_);
Dave Chinner595bff72014-10-02 09:05:14 +10001532 wait_for_completion(&bp->b_iowait);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001533 trace_xfs_buf_iowait_done(bp, _RET_IP_);
Dave Chinner595bff72014-10-02 09:05:14 +10001534 error = bp->b_error;
1535
1536 /*
1537 * all done now, we can release the hold that keeps the buffer
1538 * referenced for the entire IO.
1539 */
1540 xfs_buf_rele(bp);
1541 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542}
1543
Christoph Hellwig88ee2df2015-06-22 09:44:29 +10001544void *
Nathan Scottce8e9222006-01-11 15:39:08 +11001545xfs_buf_offset(
Christoph Hellwig88ee2df2015-06-22 09:44:29 +10001546 struct xfs_buf *bp,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 size_t offset)
1548{
1549 struct page *page;
1550
Dave Chinner611c9942012-04-23 15:59:07 +10001551 if (bp->b_addr)
Chandra Seetharaman62926042011-07-22 23:40:15 +00001552 return bp->b_addr + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553
Nathan Scottce8e9222006-01-11 15:39:08 +11001554 offset += bp->b_offset;
Dave Chinner0e6e8472011-03-26 09:16:45 +11001555 page = bp->b_pages[offset >> PAGE_SHIFT];
Christoph Hellwig88ee2df2015-06-22 09:44:29 +10001556 return page_address(page) + (offset & (PAGE_SIZE-1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557}
1558
1559/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 * Move data into or out of a buffer.
1561 */
1562void
Nathan Scottce8e9222006-01-11 15:39:08 +11001563xfs_buf_iomove(
1564 xfs_buf_t *bp, /* buffer to process */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 size_t boff, /* starting buffer offset */
1566 size_t bsize, /* length to copy */
Dave Chinnerb9c48642010-01-20 10:47:39 +11001567 void *data, /* data address */
Nathan Scottce8e9222006-01-11 15:39:08 +11001568 xfs_buf_rw_t mode) /* read/write/zero flag */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569{
Dave Chinner795cac72012-04-23 15:58:53 +10001570 size_t bend;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571
1572 bend = boff + bsize;
1573 while (boff < bend) {
Dave Chinner795cac72012-04-23 15:58:53 +10001574 struct page *page;
1575 int page_index, page_offset, csize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576
Dave Chinner795cac72012-04-23 15:58:53 +10001577 page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1578 page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1579 page = bp->b_pages[page_index];
1580 csize = min_t(size_t, PAGE_SIZE - page_offset,
1581 BBTOB(bp->b_io_length) - boff);
1582
1583 ASSERT((csize + page_offset) <= PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584
1585 switch (mode) {
Nathan Scottce8e9222006-01-11 15:39:08 +11001586 case XBRW_ZERO:
Dave Chinner795cac72012-04-23 15:58:53 +10001587 memset(page_address(page) + page_offset, 0, csize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 break;
Nathan Scottce8e9222006-01-11 15:39:08 +11001589 case XBRW_READ:
Dave Chinner795cac72012-04-23 15:58:53 +10001590 memcpy(data, page_address(page) + page_offset, csize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 break;
Nathan Scottce8e9222006-01-11 15:39:08 +11001592 case XBRW_WRITE:
Dave Chinner795cac72012-04-23 15:58:53 +10001593 memcpy(page_address(page) + page_offset, data, csize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 }
1595
1596 boff += csize;
1597 data += csize;
1598 }
1599}
1600
1601/*
Nathan Scottce8e9222006-01-11 15:39:08 +11001602 * Handling of buffer targets (buftargs).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 */
1604
1605/*
Dave Chinner430cbeb2010-12-02 16:30:55 +11001606 * Wait for any bufs with callbacks that have been submitted but have not yet
1607 * returned. These buffers will have an elevated hold count, so wait on those
1608 * while freeing all the buffers only held by the LRU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 */
Dave Chinnere80dfa12013-08-28 10:18:05 +10001610static enum lru_status
1611xfs_buftarg_wait_rele(
1612 struct list_head *item,
Vladimir Davydov3f97b162015-02-12 14:59:35 -08001613 struct list_lru_one *lru,
Dave Chinnere80dfa12013-08-28 10:18:05 +10001614 spinlock_t *lru_lock,
1615 void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616
Dave Chinnere80dfa12013-08-28 10:18:05 +10001617{
1618 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
Dave Chinnera4082352013-08-28 10:18:06 +10001619 struct list_head *dispose = arg;
Dave Chinnere80dfa12013-08-28 10:18:05 +10001620
1621 if (atomic_read(&bp->b_hold) > 1) {
Dave Chinnera4082352013-08-28 10:18:06 +10001622 /* need to wait, so skip it this pass */
Dave Chinnere80dfa12013-08-28 10:18:05 +10001623 trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
Dave Chinnera4082352013-08-28 10:18:06 +10001624 return LRU_SKIP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 }
Dave Chinnera4082352013-08-28 10:18:06 +10001626 if (!spin_trylock(&bp->b_lock))
1627 return LRU_SKIP;
Dave Chinnere80dfa12013-08-28 10:18:05 +10001628
Dave Chinnera4082352013-08-28 10:18:06 +10001629 /*
1630 * clear the LRU reference count so the buffer doesn't get
1631 * ignored in xfs_buf_rele().
1632 */
1633 atomic_set(&bp->b_lru_ref, 0);
1634 bp->b_state |= XFS_BSTATE_DISPOSE;
Vladimir Davydov3f97b162015-02-12 14:59:35 -08001635 list_lru_isolate_move(lru, item, dispose);
Dave Chinnera4082352013-08-28 10:18:06 +10001636 spin_unlock(&bp->b_lock);
1637 return LRU_REMOVED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638}
1639
Dave Chinnere80dfa12013-08-28 10:18:05 +10001640void
1641xfs_wait_buftarg(
1642 struct xfs_buftarg *btp)
1643{
Dave Chinnera4082352013-08-28 10:18:06 +10001644 LIST_HEAD(dispose);
1645 int loop = 0;
1646
Dave Chinner85bec542016-01-19 08:28:10 +11001647 /*
Brian Foster9c7504a2016-07-20 11:15:28 +10001648 * First wait on the buftarg I/O count for all in-flight buffers to be
1649 * released. This is critical as new buffers do not make the LRU until
1650 * they are released.
1651 *
1652 * Next, flush the buffer workqueue to ensure all completion processing
1653 * has finished. Just waiting on buffer locks is not sufficient for
1654 * async IO as the reference count held over IO is not released until
1655 * after the buffer lock is dropped. Hence we need to ensure here that
1656 * all reference counts have been dropped before we start walking the
1657 * LRU list.
Dave Chinner85bec542016-01-19 08:28:10 +11001658 */
Brian Foster9c7504a2016-07-20 11:15:28 +10001659 while (percpu_counter_sum(&btp->bt_io_count))
1660 delay(100);
Brian Foster800b2692016-08-26 16:01:59 +10001661 flush_workqueue(btp->bt_mount->m_buf_workqueue);
Dave Chinner85bec542016-01-19 08:28:10 +11001662
Dave Chinnera4082352013-08-28 10:18:06 +10001663 /* loop until there is nothing left on the lru list. */
1664 while (list_lru_count(&btp->bt_lru)) {
Dave Chinnere80dfa12013-08-28 10:18:05 +10001665 list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
Dave Chinnera4082352013-08-28 10:18:06 +10001666 &dispose, LONG_MAX);
1667
1668 while (!list_empty(&dispose)) {
1669 struct xfs_buf *bp;
1670 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1671 list_del_init(&bp->b_lru);
Dave Chinnerac8809f2013-12-12 16:34:38 +11001672 if (bp->b_flags & XBF_WRITE_FAIL) {
1673 xfs_alert(btp->bt_mount,
Joe Perchesf41febd2015-07-29 11:52:04 +10001674"Corruption Alert: Buffer at block 0x%llx had permanent write failures!",
Dave Chinnerac8809f2013-12-12 16:34:38 +11001675 (long long)bp->b_bn);
Joe Perchesf41febd2015-07-29 11:52:04 +10001676 xfs_alert(btp->bt_mount,
1677"Please run xfs_repair to determine the extent of the problem.");
Dave Chinnerac8809f2013-12-12 16:34:38 +11001678 }
Dave Chinnera4082352013-08-28 10:18:06 +10001679 xfs_buf_rele(bp);
1680 }
1681 if (loop++ != 0)
1682 delay(100);
1683 }
Dave Chinnere80dfa12013-08-28 10:18:05 +10001684}
1685
1686static enum lru_status
1687xfs_buftarg_isolate(
1688 struct list_head *item,
Vladimir Davydov3f97b162015-02-12 14:59:35 -08001689 struct list_lru_one *lru,
Dave Chinnere80dfa12013-08-28 10:18:05 +10001690 spinlock_t *lru_lock,
1691 void *arg)
1692{
1693 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1694 struct list_head *dispose = arg;
1695
1696 /*
Dave Chinnera4082352013-08-28 10:18:06 +10001697 * we are inverting the lru lock/bp->b_lock here, so use a trylock.
1698 * If we fail to get the lock, just skip it.
1699 */
1700 if (!spin_trylock(&bp->b_lock))
1701 return LRU_SKIP;
1702 /*
Dave Chinnere80dfa12013-08-28 10:18:05 +10001703 * Decrement the b_lru_ref count unless the value is already
1704 * zero. If the value is already zero, we need to reclaim the
1705 * buffer, otherwise it gets another trip through the LRU.
1706 */
Dave Chinnera4082352013-08-28 10:18:06 +10001707 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1708 spin_unlock(&bp->b_lock);
Dave Chinnere80dfa12013-08-28 10:18:05 +10001709 return LRU_ROTATE;
Dave Chinnera4082352013-08-28 10:18:06 +10001710 }
Dave Chinnere80dfa12013-08-28 10:18:05 +10001711
Dave Chinnera4082352013-08-28 10:18:06 +10001712 bp->b_state |= XFS_BSTATE_DISPOSE;
Vladimir Davydov3f97b162015-02-12 14:59:35 -08001713 list_lru_isolate_move(lru, item, dispose);
Dave Chinnera4082352013-08-28 10:18:06 +10001714 spin_unlock(&bp->b_lock);
Dave Chinnere80dfa12013-08-28 10:18:05 +10001715 return LRU_REMOVED;
1716}
1717
Andrew Mortonaddbda42013-08-28 10:18:06 +10001718static unsigned long
Dave Chinnere80dfa12013-08-28 10:18:05 +10001719xfs_buftarg_shrink_scan(
Dave Chinnerff57ab22010-11-30 17:27:57 +11001720 struct shrinker *shrink,
Ying Han1495f232011-05-24 17:12:27 -07001721 struct shrink_control *sc)
David Chinnera6867a62006-01-11 15:37:58 +11001722{
Dave Chinnerff57ab22010-11-30 17:27:57 +11001723 struct xfs_buftarg *btp = container_of(shrink,
1724 struct xfs_buftarg, bt_shrinker);
Dave Chinner430cbeb2010-12-02 16:30:55 +11001725 LIST_HEAD(dispose);
Andrew Mortonaddbda42013-08-28 10:18:06 +10001726 unsigned long freed;
Dave Chinner430cbeb2010-12-02 16:30:55 +11001727
Vladimir Davydov503c3582015-02-12 14:58:47 -08001728 freed = list_lru_shrink_walk(&btp->bt_lru, sc,
1729 xfs_buftarg_isolate, &dispose);
Dave Chinner430cbeb2010-12-02 16:30:55 +11001730
1731 while (!list_empty(&dispose)) {
Dave Chinnere80dfa12013-08-28 10:18:05 +10001732 struct xfs_buf *bp;
Dave Chinner430cbeb2010-12-02 16:30:55 +11001733 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1734 list_del_init(&bp->b_lru);
1735 xfs_buf_rele(bp);
1736 }
1737
Dave Chinnere80dfa12013-08-28 10:18:05 +10001738 return freed;
1739}
1740
Andrew Mortonaddbda42013-08-28 10:18:06 +10001741static unsigned long
Dave Chinnere80dfa12013-08-28 10:18:05 +10001742xfs_buftarg_shrink_count(
1743 struct shrinker *shrink,
1744 struct shrink_control *sc)
1745{
1746 struct xfs_buftarg *btp = container_of(shrink,
1747 struct xfs_buftarg, bt_shrinker);
Vladimir Davydov503c3582015-02-12 14:58:47 -08001748 return list_lru_shrink_count(&btp->bt_lru, sc);
David Chinnera6867a62006-01-11 15:37:58 +11001749}
1750
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751void
1752xfs_free_buftarg(
Christoph Hellwigb7963132009-03-03 14:48:37 -05001753 struct xfs_mount *mp,
1754 struct xfs_buftarg *btp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755{
Dave Chinnerff57ab22010-11-30 17:27:57 +11001756 unregister_shrinker(&btp->bt_shrinker);
Brian Foster9c7504a2016-07-20 11:15:28 +10001757 ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
1758 percpu_counter_destroy(&btp->bt_io_count);
Glauber Costaf5e1dd32013-08-28 10:18:18 +10001759 list_lru_destroy(&btp->bt_lru);
Dave Chinnerff57ab22010-11-30 17:27:57 +11001760
Dave Chinner2291dab2016-12-09 16:49:54 +11001761 xfs_blkdev_issue_flush(btp);
David Chinnera6867a62006-01-11 15:37:58 +11001762
Denys Vlasenkof0e2d932008-05-19 16:31:57 +10001763 kmem_free(btp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764}
1765
Eric Sandeen3fefdee2013-11-13 14:53:45 -06001766int
1767xfs_setsize_buftarg(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768 xfs_buftarg_t *btp,
Eric Sandeen3fefdee2013-11-13 14:53:45 -06001769 unsigned int sectorsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770{
Eric Sandeen7c71ee72014-01-21 16:46:23 -06001771 /* Set up metadata sector size info */
Eric Sandeen6da54172014-01-21 16:45:52 -06001772 btp->bt_meta_sectorsize = sectorsize;
1773 btp->bt_meta_sectormask = sectorsize - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774
Nathan Scottce8e9222006-01-11 15:39:08 +11001775 if (set_blocksize(btp->bt_bdev, sectorsize)) {
Dave Chinner4f107002011-03-07 10:00:35 +11001776 xfs_warn(btp->bt_mount,
Dmitry Monakhova1c6f0572015-04-13 16:31:37 +04001777 "Cannot set_blocksize to %u on device %pg",
1778 sectorsize, btp->bt_bdev);
Dave Chinner24513372014-06-25 14:58:08 +10001779 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 }
1781
Eric Sandeen7c71ee72014-01-21 16:46:23 -06001782 /* Set up device logical sector size mask */
1783 btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev);
1784 btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1;
1785
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 return 0;
1787}
1788
1789/*
Eric Sandeen3fefdee2013-11-13 14:53:45 -06001790 * When allocating the initial buffer target we have not yet
1791 * read in the superblock, so don't know what sized sectors
1792 * are being used at this early stage. Play safe.
Nathan Scottce8e9222006-01-11 15:39:08 +11001793 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794STATIC int
1795xfs_setsize_buftarg_early(
1796 xfs_buftarg_t *btp,
1797 struct block_device *bdev)
1798{
Eric Sandeena96c4152014-04-14 19:00:29 +10001799 return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800}
1801
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802xfs_buftarg_t *
1803xfs_alloc_buftarg(
Dave Chinnerebad8612010-09-22 10:47:20 +10001804 struct xfs_mount *mp,
Eric Sandeen34dcefd2014-04-14 19:01:00 +10001805 struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806{
1807 xfs_buftarg_t *btp;
1808
Dave Chinnerb17cb362013-05-20 09:51:12 +10001809 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810
Dave Chinnerebad8612010-09-22 10:47:20 +10001811 btp->bt_mount = mp;
Nathan Scottce8e9222006-01-11 15:39:08 +11001812 btp->bt_dev = bdev->bd_dev;
1813 btp->bt_bdev = bdev;
Dave Chinner0e6e8472011-03-26 09:16:45 +11001814
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 if (xfs_setsize_buftarg_early(btp, bdev))
1816 goto error;
Glauber Costa5ca302c2013-08-28 10:18:18 +10001817
1818 if (list_lru_init(&btp->bt_lru))
1819 goto error;
1820
Brian Foster9c7504a2016-07-20 11:15:28 +10001821 if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
1822 goto error;
1823
Dave Chinnere80dfa12013-08-28 10:18:05 +10001824 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
1825 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
Dave Chinnerff57ab22010-11-30 17:27:57 +11001826 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
Dave Chinnere80dfa12013-08-28 10:18:05 +10001827 btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
Dave Chinnerff57ab22010-11-30 17:27:57 +11001828 register_shrinker(&btp->bt_shrinker);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 return btp;
1830
1831error:
Denys Vlasenkof0e2d932008-05-19 16:31:57 +10001832 kmem_free(btp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 return NULL;
1834}
1835
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836/*
Brian Foster20e8a062017-04-21 12:40:44 -07001837 * Cancel a delayed write list.
1838 *
1839 * Remove each buffer from the list, clear the delwri queue flag and drop the
1840 * associated buffer reference.
1841 */
1842void
1843xfs_buf_delwri_cancel(
1844 struct list_head *list)
1845{
1846 struct xfs_buf *bp;
1847
1848 while (!list_empty(list)) {
1849 bp = list_first_entry(list, struct xfs_buf, b_list);
1850
1851 xfs_buf_lock(bp);
1852 bp->b_flags &= ~_XBF_DELWRI_Q;
1853 list_del_init(&bp->b_list);
1854 xfs_buf_relse(bp);
1855 }
1856}
1857
1858/*
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001859 * Add a buffer to the delayed write list.
1860 *
1861 * This queues a buffer for writeout if it hasn't already been. Note that
1862 * neither this routine nor the buffer list submission functions perform
1863 * any internal synchronization. It is expected that the lists are thread-local
1864 * to the callers.
1865 *
1866 * Returns true if we queued up the buffer, or false if it already had
1867 * been on the buffer list.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 */
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001869bool
Nathan Scottce8e9222006-01-11 15:39:08 +11001870xfs_buf_delwri_queue(
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001871 struct xfs_buf *bp,
1872 struct list_head *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873{
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001874 ASSERT(xfs_buf_islocked(bp));
1875 ASSERT(!(bp->b_flags & XBF_READ));
1876
1877 /*
1878 * If the buffer is already marked delwri it already is queued up
1879 * by someone else for imediate writeout. Just ignore it in that
1880 * case.
1881 */
1882 if (bp->b_flags & _XBF_DELWRI_Q) {
1883 trace_xfs_buf_delwri_queued(bp, _RET_IP_);
1884 return false;
1885 }
David Chinnera6867a62006-01-11 15:37:58 +11001886
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001887 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1888
Dave Chinnerd808f612010-02-02 10:13:42 +11001889 /*
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001890 * If a buffer gets written out synchronously or marked stale while it
1891 * is on a delwri list we lazily remove it. To do this, the other party
1892 * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
1893 * It remains referenced and on the list. In a rare corner case it
1894 * might get readded to a delwri list after the synchronous writeout, in
1895 * which case we need just need to re-add the flag here.
Dave Chinnerd808f612010-02-02 10:13:42 +11001896 */
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001897 bp->b_flags |= _XBF_DELWRI_Q;
1898 if (list_empty(&bp->b_list)) {
1899 atomic_inc(&bp->b_hold);
1900 list_add_tail(&bp->b_list, list);
David Chinner585e6d82007-02-10 18:32:29 +11001901 }
David Chinner585e6d82007-02-10 18:32:29 +11001902
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001903 return true;
David Chinner585e6d82007-02-10 18:32:29 +11001904}
1905
Dave Chinner089716a2010-01-26 15:13:25 +11001906/*
1907 * Compare function is more complex than it needs to be because
1908 * the return value is only 32 bits and we are doing comparisons
1909 * on 64 bit values
1910 */
1911static int
1912xfs_buf_cmp(
1913 void *priv,
1914 struct list_head *a,
1915 struct list_head *b)
1916{
1917 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
1918 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
1919 xfs_daddr_t diff;
1920
Mark Tinguelyf4b42422012-12-04 17:18:02 -06001921 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
Dave Chinner089716a2010-01-26 15:13:25 +11001922 if (diff < 0)
1923 return -1;
1924 if (diff > 0)
1925 return 1;
1926 return 0;
1927}
1928
Dave Chinner26f1fe82016-06-01 17:38:15 +10001929/*
1930 * submit buffers for write.
1931 *
1932 * When we have a large buffer list, we do not want to hold all the buffers
1933 * locked while we block on the request queue waiting for IO dispatch. To avoid
1934 * this problem, we lock and submit buffers in groups of 50, thereby minimising
1935 * the lock hold times for lists which may contain thousands of objects.
1936 *
1937 * To do this, we sort the buffer list before we walk the list to lock and
1938 * submit buffers, and we plug and unplug around each group of buffers we
1939 * submit.
1940 */
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001941static int
Dave Chinner26f1fe82016-06-01 17:38:15 +10001942xfs_buf_delwri_submit_buffers(
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001943 struct list_head *buffer_list,
Dave Chinner26f1fe82016-06-01 17:38:15 +10001944 struct list_head *wait_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945{
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001946 struct xfs_buf *bp, *n;
Dave Chinner26f1fe82016-06-01 17:38:15 +10001947 LIST_HEAD (submit_list);
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001948 int pinned = 0;
Dave Chinner26f1fe82016-06-01 17:38:15 +10001949 struct blk_plug plug;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950
Dave Chinner26f1fe82016-06-01 17:38:15 +10001951 list_sort(NULL, buffer_list, xfs_buf_cmp);
1952
1953 blk_start_plug(&plug);
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001954 list_for_each_entry_safe(bp, n, buffer_list, b_list) {
Dave Chinner26f1fe82016-06-01 17:38:15 +10001955 if (!wait_list) {
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001956 if (xfs_buf_ispinned(bp)) {
1957 pinned++;
1958 continue;
1959 }
1960 if (!xfs_buf_trylock(bp))
1961 continue;
1962 } else {
1963 xfs_buf_lock(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001966 /*
1967 * Someone else might have written the buffer synchronously or
1968 * marked it stale in the meantime. In that case only the
1969 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
1970 * reference and remove it from the list here.
1971 */
1972 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1973 list_del_init(&bp->b_list);
1974 xfs_buf_relse(bp);
1975 continue;
1976 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001978 trace_xfs_buf_delwri_split(bp, _RET_IP_);
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001979
Dave Chinnercf53e992014-10-02 09:04:01 +10001980 /*
Dave Chinner26f1fe82016-06-01 17:38:15 +10001981 * We do all IO submission async. This means if we need
1982 * to wait for IO completion we need to take an extra
1983 * reference so the buffer is still valid on the other
1984 * side. We need to move the buffer onto the io_list
1985 * at this point so the caller can still access it.
Dave Chinnercf53e992014-10-02 09:04:01 +10001986 */
Dave Chinnerbbfeb612016-07-20 11:53:35 +10001987 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL);
Dave Chinner26f1fe82016-06-01 17:38:15 +10001988 bp->b_flags |= XBF_WRITE | XBF_ASYNC;
1989 if (wait_list) {
Dave Chinnercf53e992014-10-02 09:04:01 +10001990 xfs_buf_hold(bp);
Dave Chinner26f1fe82016-06-01 17:38:15 +10001991 list_move_tail(&bp->b_list, wait_list);
1992 } else
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001993 list_del_init(&bp->b_list);
Dave Chinner8dac3922014-10-02 09:04:40 +10001994
Dave Chinner595bff72014-10-02 09:05:14 +10001995 xfs_buf_submit(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 }
Christoph Hellwiga1b7ea52011-03-30 11:05:09 +00001997 blk_finish_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998
Christoph Hellwig43ff2122012-04-23 15:58:39 +10001999 return pinned;
2000}
Nathan Scottf07c2252006-09-28 10:52:15 +10002001
Christoph Hellwig43ff2122012-04-23 15:58:39 +10002002/*
2003 * Write out a buffer list asynchronously.
2004 *
2005 * This will take the @buffer_list, write all non-locked and non-pinned buffers
2006 * out and not wait for I/O completion on any of the buffers. This interface
2007 * is only safely useable for callers that can track I/O completion by higher
2008 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
2009 * function.
2010 */
2011int
2012xfs_buf_delwri_submit_nowait(
2013 struct list_head *buffer_list)
2014{
Dave Chinner26f1fe82016-06-01 17:38:15 +10002015 return xfs_buf_delwri_submit_buffers(buffer_list, NULL);
Christoph Hellwig43ff2122012-04-23 15:58:39 +10002016}
2017
2018/*
2019 * Write out a buffer list synchronously.
2020 *
2021 * This will take the @buffer_list, write all buffers out and wait for I/O
2022 * completion on all of the buffers. @buffer_list is consumed by the function,
2023 * so callers must have some other way of tracking buffers if they require such
2024 * functionality.
2025 */
2026int
2027xfs_buf_delwri_submit(
2028 struct list_head *buffer_list)
2029{
Dave Chinner26f1fe82016-06-01 17:38:15 +10002030 LIST_HEAD (wait_list);
Christoph Hellwig43ff2122012-04-23 15:58:39 +10002031 int error = 0, error2;
2032 struct xfs_buf *bp;
2033
Dave Chinner26f1fe82016-06-01 17:38:15 +10002034 xfs_buf_delwri_submit_buffers(buffer_list, &wait_list);
Christoph Hellwig43ff2122012-04-23 15:58:39 +10002035
2036 /* Wait for IO to complete. */
Dave Chinner26f1fe82016-06-01 17:38:15 +10002037 while (!list_empty(&wait_list)) {
2038 bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
Christoph Hellwig43ff2122012-04-23 15:58:39 +10002039
2040 list_del_init(&bp->b_list);
Dave Chinnercf53e992014-10-02 09:04:01 +10002041
2042 /* locking the buffer will wait for async IO completion. */
2043 xfs_buf_lock(bp);
2044 error2 = bp->b_error;
Christoph Hellwig43ff2122012-04-23 15:58:39 +10002045 xfs_buf_relse(bp);
2046 if (!error)
2047 error = error2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 }
2049
Christoph Hellwig43ff2122012-04-23 15:58:39 +10002050 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051}
2052
Brian Foster7912e7f2017-06-14 21:21:45 -07002053/*
2054 * Push a single buffer on a delwri queue.
2055 *
2056 * The purpose of this function is to submit a single buffer of a delwri queue
2057 * and return with the buffer still on the original queue. The waiting delwri
2058 * buffer submission infrastructure guarantees transfer of the delwri queue
2059 * buffer reference to a temporary wait list. We reuse this infrastructure to
2060 * transfer the buffer back to the original queue.
2061 *
2062 * Note the buffer transitions from the queued state, to the submitted and wait
2063 * listed state and back to the queued state during this call. The buffer
2064 * locking and queue management logic between _delwri_pushbuf() and
2065 * _delwri_queue() guarantee that the buffer cannot be queued to another list
2066 * before returning.
2067 */
2068int
2069xfs_buf_delwri_pushbuf(
2070 struct xfs_buf *bp,
2071 struct list_head *buffer_list)
2072{
2073 LIST_HEAD (submit_list);
2074 int error;
2075
2076 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
2077
2078 trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_);
2079
2080 /*
2081 * Isolate the buffer to a new local list so we can submit it for I/O
2082 * independently from the rest of the original list.
2083 */
2084 xfs_buf_lock(bp);
2085 list_move(&bp->b_list, &submit_list);
2086 xfs_buf_unlock(bp);
2087
2088 /*
2089 * Delwri submission clears the DELWRI_Q buffer flag and returns with
2090 * the buffer on the wait list with an associated reference. Rather than
2091 * bounce the buffer from a local wait list back to the original list
2092 * after I/O completion, reuse the original list as the wait list.
2093 */
2094 xfs_buf_delwri_submit_buffers(&submit_list, buffer_list);
2095
2096 /*
2097 * The buffer is now under I/O and wait listed as during typical delwri
2098 * submission. Lock the buffer to wait for I/O completion. Rather than
2099 * remove the buffer from the wait list and release the reference, we
2100 * want to return with the buffer queued to the original list. The
2101 * buffer already sits on the original list with a wait list reference,
2102 * however. If we let the queue inherit that wait list reference, all we
2103 * need to do is reset the DELWRI_Q flag.
2104 */
2105 xfs_buf_lock(bp);
2106 error = bp->b_error;
2107 bp->b_flags |= _XBF_DELWRI_Q;
2108 xfs_buf_unlock(bp);
2109
2110 return error;
2111}
2112
Christoph Hellwig04d8b282005-11-02 10:15:05 +11002113int __init
Nathan Scottce8e9222006-01-11 15:39:08 +11002114xfs_buf_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115{
Nathan Scott87582802006-03-14 13:18:19 +11002116 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
2117 KM_ZONE_HWALIGN, NULL);
Nathan Scottce8e9222006-01-11 15:39:08 +11002118 if (!xfs_buf_zone)
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002119 goto out;
Christoph Hellwig04d8b282005-11-02 10:15:05 +11002120
Christoph Hellwig23ea4032005-06-21 15:14:01 +10002121 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002123 out:
Nathan Scott87582802006-03-14 13:18:19 +11002124 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125}
2126
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127void
Nathan Scottce8e9222006-01-11 15:39:08 +11002128xfs_buf_terminate(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129{
Nathan Scottce8e9222006-01-11 15:39:08 +11002130 kmem_zone_destroy(xfs_buf_zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131}