| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Nathan Scott | f07c225 | 2006-09-28 10:52:15 +1000 | [diff] [blame] | 2 | * Copyright (c) 2000-2006 Silicon Graphics, Inc. | 
| Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 3 | * All Rights Reserved. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * | 
| Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 5 | * This program is free software; you can redistribute it and/or | 
|  | 6 | * modify it under the terms of the GNU General Public License as | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * published by the Free Software Foundation. | 
|  | 8 | * | 
| Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 9 | * This program is distributed in the hope that it would be useful, | 
|  | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 12 | * GNU General Public License for more details. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | * | 
| Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 14 | * You should have received a copy of the GNU General Public License | 
|  | 15 | * along with this program; if not, write the Free Software Foundation, | 
|  | 16 | * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | */ | 
| Vlad Apostolov | 93c189c | 2006-11-11 18:03:49 +1100 | [diff] [blame] | 18 | #include "xfs.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/stddef.h> | 
|  | 20 | #include <linux/errno.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 21 | #include <linux/gfp.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/pagemap.h> | 
|  | 23 | #include <linux/init.h> | 
|  | 24 | #include <linux/vmalloc.h> | 
|  | 25 | #include <linux/bio.h> | 
|  | 26 | #include <linux/sysctl.h> | 
|  | 27 | #include <linux/proc_fs.h> | 
|  | 28 | #include <linux/workqueue.h> | 
|  | 29 | #include <linux/percpu.h> | 
|  | 30 | #include <linux/blkdev.h> | 
|  | 31 | #include <linux/hash.h> | 
| Christoph Hellwig | 4df08c5 | 2005-09-05 08:34:18 +1000 | [diff] [blame] | 32 | #include <linux/kthread.h> | 
| Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 33 | #include <linux/migrate.h> | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 34 | #include <linux/backing-dev.h> | 
| Nigel Cunningham | 7dfb710 | 2006-12-06 20:34:23 -0800 | [diff] [blame] | 35 | #include <linux/freezer.h> | 
| Ingo Molnar | 5b3cc15 | 2017-02-02 20:43:54 +0100 | [diff] [blame] | 36 | #include <linux/sched/mm.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 |  | 
| Christoph Hellwig | 4fb6e8a | 2014-11-28 14:25:04 +1100 | [diff] [blame] | 38 | #include "xfs_format.h" | 
| Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 39 | #include "xfs_log_format.h" | 
| Dave Chinner | 7fd36c4 | 2013-08-12 20:49:32 +1000 | [diff] [blame] | 40 | #include "xfs_trans_resv.h" | 
| Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 41 | #include "xfs_sb.h" | 
| Christoph Hellwig | b796313 | 2009-03-03 14:48:37 -0500 | [diff] [blame] | 42 | #include "xfs_mount.h" | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 43 | #include "xfs_trace.h" | 
| Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 44 | #include "xfs_log.h" | 
| Darrick J. Wong | e9e899a | 2017-10-31 12:04:49 -0700 | [diff] [blame] | 45 | #include "xfs_errortag.h" | 
| Brian Foster | 7561d27 | 2017-10-17 14:16:29 -0700 | [diff] [blame] | 46 | #include "xfs_error.h" | 
| Christoph Hellwig | b796313 | 2009-03-03 14:48:37 -0500 | [diff] [blame] | 47 |  | 
| David Chinner | 7989cb8 | 2007-02-10 18:34:56 +1100 | [diff] [blame] | 48 | static kmem_zone_t *xfs_buf_zone; | 
| Christoph Hellwig | 23ea403 | 2005-06-21 15:14:01 +1000 | [diff] [blame] | 49 |  | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 50 | #ifdef XFS_BUF_LOCK_TRACKING | 
|  | 51 | # define XB_SET_OWNER(bp)	((bp)->b_last_holder = current->pid) | 
|  | 52 | # define XB_CLEAR_OWNER(bp)	((bp)->b_last_holder = -1) | 
|  | 53 | # define XB_GET_OWNER(bp)	((bp)->b_last_holder) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | #else | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 55 | # define XB_SET_OWNER(bp)	do { } while (0) | 
|  | 56 | # define XB_CLEAR_OWNER(bp)	do { } while (0) | 
|  | 57 | # define XB_GET_OWNER(bp)	do { } while (0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | #endif | 
|  | 59 |  | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 60 | #define xb_to_gfp(flags) \ | 
| Dave Chinner | aa5c158 | 2012-04-23 15:58:56 +1000 | [diff] [blame] | 61 | ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 |  | 
| James Bottomley | 73c77e2 | 2010-01-25 11:42:24 -0600 | [diff] [blame] | 64 | static inline int | 
|  | 65 | xfs_buf_is_vmapped( | 
|  | 66 | struct xfs_buf	*bp) | 
|  | 67 | { | 
|  | 68 | /* | 
|  | 69 | * Return true if the buffer is vmapped. | 
|  | 70 | * | 
| Dave Chinner | 611c994 | 2012-04-23 15:59:07 +1000 | [diff] [blame] | 71 | * b_addr is null if the buffer is not mapped, but the code is clever | 
|  | 72 | * enough to know it doesn't have to map a single page, so the check has | 
|  | 73 | * to be both for b_addr and bp->b_page_count > 1. | 
| James Bottomley | 73c77e2 | 2010-01-25 11:42:24 -0600 | [diff] [blame] | 74 | */ | 
| Dave Chinner | 611c994 | 2012-04-23 15:59:07 +1000 | [diff] [blame] | 75 | return bp->b_addr && bp->b_page_count > 1; | 
| James Bottomley | 73c77e2 | 2010-01-25 11:42:24 -0600 | [diff] [blame] | 76 | } | 
|  | 77 |  | 
|  | 78 | static inline int | 
|  | 79 | xfs_buf_vmap_len( | 
|  | 80 | struct xfs_buf	*bp) | 
|  | 81 | { | 
|  | 82 | return (bp->b_page_count * PAGE_SIZE) - bp->b_offset; | 
|  | 83 | } | 
|  | 84 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | /* | 
| Brian Foster | 9c7504a | 2016-07-20 11:15:28 +1000 | [diff] [blame] | 86 | * Bump the I/O in flight count on the buftarg if we haven't yet done so for | 
|  | 87 | * this buffer. The count is incremented once per buffer (per hold cycle) | 
|  | 88 | * because the corresponding decrement is deferred to buffer release. Buffers | 
|  | 89 | * can undergo I/O multiple times in a hold-release cycle and per buffer I/O | 
|  | 90 | * tracking adds unnecessary overhead. This is used for sychronization purposes | 
|  | 91 | * with unmount (see xfs_wait_buftarg()), so all we really need is a count of | 
|  | 92 | * in-flight buffers. | 
|  | 93 | * | 
|  | 94 | * Buffers that are never released (e.g., superblock, iclog buffers) must set | 
|  | 95 | * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count | 
|  | 96 | * never reaches zero and unmount hangs indefinitely. | 
|  | 97 | */ | 
|  | 98 | static inline void | 
|  | 99 | xfs_buf_ioacct_inc( | 
|  | 100 | struct xfs_buf	*bp) | 
|  | 101 | { | 
| Brian Foster | 63db7c8 | 2017-05-31 08:22:52 -0700 | [diff] [blame] | 102 | if (bp->b_flags & XBF_NO_IOACCT) | 
| Brian Foster | 9c7504a | 2016-07-20 11:15:28 +1000 | [diff] [blame] | 103 | return; | 
|  | 104 |  | 
|  | 105 | ASSERT(bp->b_flags & XBF_ASYNC); | 
| Brian Foster | 63db7c8 | 2017-05-31 08:22:52 -0700 | [diff] [blame] | 106 | spin_lock(&bp->b_lock); | 
|  | 107 | if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) { | 
|  | 108 | bp->b_state |= XFS_BSTATE_IN_FLIGHT; | 
|  | 109 | percpu_counter_inc(&bp->b_target->bt_io_count); | 
|  | 110 | } | 
|  | 111 | spin_unlock(&bp->b_lock); | 
| Brian Foster | 9c7504a | 2016-07-20 11:15:28 +1000 | [diff] [blame] | 112 | } | 
|  | 113 |  | 
|  | 114 | /* | 
|  | 115 | * Clear the in-flight state on a buffer about to be released to the LRU or | 
|  | 116 | * freed and unaccount from the buftarg. | 
|  | 117 | */ | 
|  | 118 | static inline void | 
| Brian Foster | 63db7c8 | 2017-05-31 08:22:52 -0700 | [diff] [blame] | 119 | __xfs_buf_ioacct_dec( | 
|  | 120 | struct xfs_buf	*bp) | 
|  | 121 | { | 
| Brian Foster | 95989c4 | 2017-06-08 08:23:07 -0700 | [diff] [blame] | 122 | lockdep_assert_held(&bp->b_lock); | 
| Brian Foster | 63db7c8 | 2017-05-31 08:22:52 -0700 | [diff] [blame] | 123 |  | 
|  | 124 | if (bp->b_state & XFS_BSTATE_IN_FLIGHT) { | 
|  | 125 | bp->b_state &= ~XFS_BSTATE_IN_FLIGHT; | 
|  | 126 | percpu_counter_dec(&bp->b_target->bt_io_count); | 
|  | 127 | } | 
|  | 128 | } | 
|  | 129 |  | 
|  | 130 | static inline void | 
| Brian Foster | 9c7504a | 2016-07-20 11:15:28 +1000 | [diff] [blame] | 131 | xfs_buf_ioacct_dec( | 
|  | 132 | struct xfs_buf	*bp) | 
|  | 133 | { | 
| Brian Foster | 63db7c8 | 2017-05-31 08:22:52 -0700 | [diff] [blame] | 134 | spin_lock(&bp->b_lock); | 
|  | 135 | __xfs_buf_ioacct_dec(bp); | 
|  | 136 | spin_unlock(&bp->b_lock); | 
| Brian Foster | 9c7504a | 2016-07-20 11:15:28 +1000 | [diff] [blame] | 137 | } | 
|  | 138 |  | 
|  | 139 | /* | 
| Dave Chinner | 430cbeb | 2010-12-02 16:30:55 +1100 | [diff] [blame] | 140 | * When we mark a buffer stale, we remove the buffer from the LRU and clear the | 
|  | 141 | * b_lru_ref count so that the buffer is freed immediately when the buffer | 
|  | 142 | * reference count falls to zero. If the buffer is already on the LRU, we need | 
|  | 143 | * to remove the reference that LRU holds on the buffer. | 
|  | 144 | * | 
|  | 145 | * This prevents build-up of stale buffers on the LRU. | 
|  | 146 | */ | 
|  | 147 | void | 
|  | 148 | xfs_buf_stale( | 
|  | 149 | struct xfs_buf	*bp) | 
|  | 150 | { | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 151 | ASSERT(xfs_buf_islocked(bp)); | 
|  | 152 |  | 
| Dave Chinner | 430cbeb | 2010-12-02 16:30:55 +1100 | [diff] [blame] | 153 | bp->b_flags |= XBF_STALE; | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 154 |  | 
|  | 155 | /* | 
|  | 156 | * Clear the delwri status so that a delwri queue walker will not | 
|  | 157 | * flush this buffer to disk now that it is stale. The delwri queue has | 
|  | 158 | * a reference to the buffer, so this is safe to do. | 
|  | 159 | */ | 
|  | 160 | bp->b_flags &= ~_XBF_DELWRI_Q; | 
|  | 161 |  | 
| Brian Foster | 9c7504a | 2016-07-20 11:15:28 +1000 | [diff] [blame] | 162 | /* | 
|  | 163 | * Once the buffer is marked stale and unlocked, a subsequent lookup | 
|  | 164 | * could reset b_flags. There is no guarantee that the buffer is | 
|  | 165 | * unaccounted (released to LRU) before that occurs. Drop in-flight | 
|  | 166 | * status now to preserve accounting consistency. | 
|  | 167 | */ | 
| Dave Chinner | a408235 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 168 | spin_lock(&bp->b_lock); | 
| Brian Foster | 63db7c8 | 2017-05-31 08:22:52 -0700 | [diff] [blame] | 169 | __xfs_buf_ioacct_dec(bp); | 
|  | 170 |  | 
| Dave Chinner | a408235 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 171 | atomic_set(&bp->b_lru_ref, 0); | 
|  | 172 | if (!(bp->b_state & XFS_BSTATE_DISPOSE) && | 
| Dave Chinner | e80dfa1 | 2013-08-28 10:18:05 +1000 | [diff] [blame] | 173 | (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru))) | 
|  | 174 | atomic_dec(&bp->b_hold); | 
| Dave Chinner | 430cbeb | 2010-12-02 16:30:55 +1100 | [diff] [blame] | 175 |  | 
| Dave Chinner | 430cbeb | 2010-12-02 16:30:55 +1100 | [diff] [blame] | 176 | ASSERT(atomic_read(&bp->b_hold) >= 1); | 
| Dave Chinner | a408235 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 177 | spin_unlock(&bp->b_lock); | 
| Dave Chinner | 430cbeb | 2010-12-02 16:30:55 +1100 | [diff] [blame] | 178 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 |  | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 180 | static int | 
|  | 181 | xfs_buf_get_maps( | 
|  | 182 | struct xfs_buf		*bp, | 
|  | 183 | int			map_count) | 
|  | 184 | { | 
|  | 185 | ASSERT(bp->b_maps == NULL); | 
|  | 186 | bp->b_map_count = map_count; | 
|  | 187 |  | 
|  | 188 | if (map_count == 1) { | 
| Mark Tinguely | f4b4242 | 2012-12-04 17:18:02 -0600 | [diff] [blame] | 189 | bp->b_maps = &bp->__b_map; | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 190 | return 0; | 
|  | 191 | } | 
|  | 192 |  | 
|  | 193 | bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map), | 
|  | 194 | KM_NOFS); | 
|  | 195 | if (!bp->b_maps) | 
| Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 196 | return -ENOMEM; | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 197 | return 0; | 
|  | 198 | } | 
|  | 199 |  | 
|  | 200 | /* | 
|  | 201 | *	Frees b_pages if it was allocated. | 
|  | 202 | */ | 
|  | 203 | static void | 
|  | 204 | xfs_buf_free_maps( | 
|  | 205 | struct xfs_buf	*bp) | 
|  | 206 | { | 
| Mark Tinguely | f4b4242 | 2012-12-04 17:18:02 -0600 | [diff] [blame] | 207 | if (bp->b_maps != &bp->__b_map) { | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 208 | kmem_free(bp->b_maps); | 
|  | 209 | bp->b_maps = NULL; | 
|  | 210 | } | 
|  | 211 | } | 
|  | 212 |  | 
| Christoph Hellwig | 4347b9d | 2011-10-10 16:52:48 +0000 | [diff] [blame] | 213 | struct xfs_buf * | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 214 | _xfs_buf_alloc( | 
| Christoph Hellwig | 4347b9d | 2011-10-10 16:52:48 +0000 | [diff] [blame] | 215 | struct xfs_buftarg	*target, | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 216 | struct xfs_buf_map	*map, | 
|  | 217 | int			nmaps, | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 218 | xfs_buf_flags_t		flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | { | 
| Christoph Hellwig | 4347b9d | 2011-10-10 16:52:48 +0000 | [diff] [blame] | 220 | struct xfs_buf		*bp; | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 221 | int			error; | 
|  | 222 | int			i; | 
| Christoph Hellwig | 4347b9d | 2011-10-10 16:52:48 +0000 | [diff] [blame] | 223 |  | 
| Dave Chinner | aa5c158 | 2012-04-23 15:58:56 +1000 | [diff] [blame] | 224 | bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS); | 
| Christoph Hellwig | 4347b9d | 2011-10-10 16:52:48 +0000 | [diff] [blame] | 225 | if (unlikely(!bp)) | 
|  | 226 | return NULL; | 
|  | 227 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | /* | 
| Dave Chinner | 12bcb3f | 2012-04-23 15:59:05 +1000 | [diff] [blame] | 229 | * We don't want certain flags to appear in b_flags unless they are | 
|  | 230 | * specifically set by later operations on the buffer. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | */ | 
| Dave Chinner | 611c994 | 2012-04-23 15:59:07 +1000 | [diff] [blame] | 232 | flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 |  | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 234 | atomic_set(&bp->b_hold, 1); | 
| Dave Chinner | 430cbeb | 2010-12-02 16:30:55 +1100 | [diff] [blame] | 235 | atomic_set(&bp->b_lru_ref, 1); | 
| David Chinner | b4dd330 | 2008-08-13 16:36:11 +1000 | [diff] [blame] | 236 | init_completion(&bp->b_iowait); | 
| Dave Chinner | 430cbeb | 2010-12-02 16:30:55 +1100 | [diff] [blame] | 237 | INIT_LIST_HEAD(&bp->b_lru); | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 238 | INIT_LIST_HEAD(&bp->b_list); | 
| Carlos Maiolino | 643c8c0 | 2018-01-24 13:38:49 -0800 | [diff] [blame] | 239 | INIT_LIST_HEAD(&bp->b_li_list); | 
| Thomas Gleixner | a731cd1 | 2010-09-07 14:33:15 +0000 | [diff] [blame] | 240 | sema_init(&bp->b_sema, 0); /* held, no waiters */ | 
| Dave Chinner | a408235 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 241 | spin_lock_init(&bp->b_lock); | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 242 | XB_SET_OWNER(bp); | 
|  | 243 | bp->b_target = target; | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 244 | bp->b_flags = flags; | 
| Dave Chinner | de1cbee | 2012-04-23 15:58:50 +1000 | [diff] [blame] | 245 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | /* | 
| Dave Chinner | aa0e883 | 2012-04-23 15:58:52 +1000 | [diff] [blame] | 247 | * Set length and io_length to the same value initially. | 
|  | 248 | * I/O routines should use io_length, which will be the same in | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | * most cases but may be reset (e.g. XFS recovery). | 
|  | 250 | */ | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 251 | error = xfs_buf_get_maps(bp, nmaps); | 
|  | 252 | if (error)  { | 
|  | 253 | kmem_zone_free(xfs_buf_zone, bp); | 
|  | 254 | return NULL; | 
|  | 255 | } | 
|  | 256 |  | 
|  | 257 | bp->b_bn = map[0].bm_bn; | 
|  | 258 | bp->b_length = 0; | 
|  | 259 | for (i = 0; i < nmaps; i++) { | 
|  | 260 | bp->b_maps[i].bm_bn = map[i].bm_bn; | 
|  | 261 | bp->b_maps[i].bm_len = map[i].bm_len; | 
|  | 262 | bp->b_length += map[i].bm_len; | 
|  | 263 | } | 
|  | 264 | bp->b_io_length = bp->b_length; | 
|  | 265 |  | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 266 | atomic_set(&bp->b_pin_count, 0); | 
|  | 267 | init_waitqueue_head(&bp->b_waiters); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 |  | 
| Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 269 | XFS_STATS_INC(target->bt_mount, xb_create); | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 270 | trace_xfs_buf_init(bp, _RET_IP_); | 
| Christoph Hellwig | 4347b9d | 2011-10-10 16:52:48 +0000 | [diff] [blame] | 271 |  | 
|  | 272 | return bp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | } | 
|  | 274 |  | 
|  | 275 | /* | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 276 | *	Allocate a page array capable of holding a specified number | 
|  | 277 | *	of pages, and point the page buf at it. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | */ | 
|  | 279 | STATIC int | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 280 | _xfs_buf_get_pages( | 
|  | 281 | xfs_buf_t		*bp, | 
| Eric Sandeen | 87937bf | 2014-04-14 19:01:20 +1000 | [diff] [blame] | 282 | int			page_count) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | { | 
|  | 284 | /* Make sure that we have a page list */ | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 285 | if (bp->b_pages == NULL) { | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 286 | bp->b_page_count = page_count; | 
|  | 287 | if (page_count <= XB_PAGES) { | 
|  | 288 | bp->b_pages = bp->b_page_array; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | } else { | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 290 | bp->b_pages = kmem_alloc(sizeof(struct page *) * | 
| Dave Chinner | aa5c158 | 2012-04-23 15:58:56 +1000 | [diff] [blame] | 291 | page_count, KM_NOFS); | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 292 | if (bp->b_pages == NULL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | return -ENOMEM; | 
|  | 294 | } | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 295 | memset(bp->b_pages, 0, sizeof(struct page *) * page_count); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | } | 
|  | 297 | return 0; | 
|  | 298 | } | 
|  | 299 |  | 
|  | 300 | /* | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 301 | *	Frees b_pages if it was allocated. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | */ | 
|  | 303 | STATIC void | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 304 | _xfs_buf_free_pages( | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | xfs_buf_t	*bp) | 
|  | 306 | { | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 307 | if (bp->b_pages != bp->b_page_array) { | 
| Denys Vlasenko | f0e2d93 | 2008-05-19 16:31:57 +1000 | [diff] [blame] | 308 | kmem_free(bp->b_pages); | 
| Dave Chinner | 3fc98b1 | 2009-12-14 23:11:57 +0000 | [diff] [blame] | 309 | bp->b_pages = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | } | 
|  | 311 | } | 
|  | 312 |  | 
|  | 313 | /* | 
|  | 314 | *	Releases the specified buffer. | 
|  | 315 | * | 
|  | 316 | * 	The modification state of any associated pages is left unchanged. | 
| Zhi Yong Wu | b46fe82 | 2013-08-07 10:10:59 +0000 | [diff] [blame] | 317 | * 	The buffer must not be on any hash - use xfs_buf_rele instead for | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | * 	hashed and refcounted buffers | 
|  | 319 | */ | 
|  | 320 | void | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 321 | xfs_buf_free( | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | xfs_buf_t		*bp) | 
|  | 323 | { | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 324 | trace_xfs_buf_free(bp, _RET_IP_); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 |  | 
| Dave Chinner | 430cbeb | 2010-12-02 16:30:55 +1100 | [diff] [blame] | 326 | ASSERT(list_empty(&bp->b_lru)); | 
|  | 327 |  | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 328 | if (bp->b_flags & _XBF_PAGES) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | uint		i; | 
|  | 330 |  | 
| James Bottomley | 73c77e2 | 2010-01-25 11:42:24 -0600 | [diff] [blame] | 331 | if (xfs_buf_is_vmapped(bp)) | 
| Alex Elder | 8a262e5 | 2010-03-16 18:55:56 +0000 | [diff] [blame] | 332 | vm_unmap_ram(bp->b_addr - bp->b_offset, | 
|  | 333 | bp->b_page_count); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 |  | 
| Nathan Scott | 948ecdb | 2006-09-28 11:03:13 +1000 | [diff] [blame] | 335 | for (i = 0; i < bp->b_page_count; i++) { | 
|  | 336 | struct page	*page = bp->b_pages[i]; | 
|  | 337 |  | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 338 | __free_page(page); | 
| Nathan Scott | 948ecdb | 2006-09-28 11:03:13 +1000 | [diff] [blame] | 339 | } | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 340 | } else if (bp->b_flags & _XBF_KMEM) | 
|  | 341 | kmem_free(bp->b_addr); | 
| Dave Chinner | 3fc98b1 | 2009-12-14 23:11:57 +0000 | [diff] [blame] | 342 | _xfs_buf_free_pages(bp); | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 343 | xfs_buf_free_maps(bp); | 
| Christoph Hellwig | 4347b9d | 2011-10-10 16:52:48 +0000 | [diff] [blame] | 344 | kmem_zone_free(xfs_buf_zone, bp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | } | 
|  | 346 |  | 
|  | 347 | /* | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 348 | * Allocates all the pages for buffer in question and builds it's page list. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | */ | 
|  | 350 | STATIC int | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 351 | xfs_buf_allocate_memory( | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | xfs_buf_t		*bp, | 
|  | 353 | uint			flags) | 
|  | 354 | { | 
| Dave Chinner | aa0e883 | 2012-04-23 15:58:52 +1000 | [diff] [blame] | 355 | size_t			size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | size_t			nbytes, offset; | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 357 | gfp_t			gfp_mask = xb_to_gfp(flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | unsigned short		page_count, i; | 
| Dave Chinner | 795cac7 | 2012-04-23 15:58:53 +1000 | [diff] [blame] | 359 | xfs_off_t		start, end; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | int			error; | 
|  | 361 |  | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 362 | /* | 
|  | 363 | * for buffers that are contained within a single page, just allocate | 
|  | 364 | * the memory from the heap - there's no need for the complexity of | 
|  | 365 | * page arrays to keep allocation down to order 0. | 
|  | 366 | */ | 
| Dave Chinner | 795cac7 | 2012-04-23 15:58:53 +1000 | [diff] [blame] | 367 | size = BBTOB(bp->b_length); | 
|  | 368 | if (size < PAGE_SIZE) { | 
| Dave Chinner | aa5c158 | 2012-04-23 15:58:56 +1000 | [diff] [blame] | 369 | bp->b_addr = kmem_alloc(size, KM_NOFS); | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 370 | if (!bp->b_addr) { | 
|  | 371 | /* low memory - use alloc_page loop instead */ | 
|  | 372 | goto use_alloc_page; | 
|  | 373 | } | 
|  | 374 |  | 
| Dave Chinner | 795cac7 | 2012-04-23 15:58:53 +1000 | [diff] [blame] | 375 | if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) != | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 376 | ((unsigned long)bp->b_addr & PAGE_MASK)) { | 
|  | 377 | /* b_addr spans two pages - use alloc_page instead */ | 
|  | 378 | kmem_free(bp->b_addr); | 
|  | 379 | bp->b_addr = NULL; | 
|  | 380 | goto use_alloc_page; | 
|  | 381 | } | 
|  | 382 | bp->b_offset = offset_in_page(bp->b_addr); | 
|  | 383 | bp->b_pages = bp->b_page_array; | 
|  | 384 | bp->b_pages[0] = virt_to_page(bp->b_addr); | 
|  | 385 | bp->b_page_count = 1; | 
| Dave Chinner | 611c994 | 2012-04-23 15:59:07 +1000 | [diff] [blame] | 386 | bp->b_flags |= _XBF_KMEM; | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 387 | return 0; | 
|  | 388 | } | 
|  | 389 |  | 
|  | 390 | use_alloc_page: | 
| Mark Tinguely | f4b4242 | 2012-12-04 17:18:02 -0600 | [diff] [blame] | 391 | start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT; | 
|  | 392 | end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1) | 
| Dave Chinner | cbb7baa | 2012-06-22 18:50:08 +1000 | [diff] [blame] | 393 | >> PAGE_SHIFT; | 
| Dave Chinner | 795cac7 | 2012-04-23 15:58:53 +1000 | [diff] [blame] | 394 | page_count = end - start; | 
| Eric Sandeen | 87937bf | 2014-04-14 19:01:20 +1000 | [diff] [blame] | 395 | error = _xfs_buf_get_pages(bp, page_count); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | if (unlikely(error)) | 
|  | 397 | return error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 |  | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 399 | offset = bp->b_offset; | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 400 | bp->b_flags |= _XBF_PAGES; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 |  | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 402 | for (i = 0; i < bp->b_page_count; i++) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 403 | struct page	*page; | 
|  | 404 | uint		retries = 0; | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 405 | retry: | 
|  | 406 | page = alloc_page(gfp_mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | if (unlikely(page == NULL)) { | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 408 | if (flags & XBF_READ_AHEAD) { | 
|  | 409 | bp->b_page_count = i; | 
| Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 410 | error = -ENOMEM; | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 411 | goto out_free_pages; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | } | 
|  | 413 |  | 
|  | 414 | /* | 
|  | 415 | * This could deadlock. | 
|  | 416 | * | 
|  | 417 | * But until all the XFS lowlevel code is revamped to | 
|  | 418 | * handle buffer allocation failures we can't do much. | 
|  | 419 | */ | 
|  | 420 | if (!(++retries % 100)) | 
| Dave Chinner | 4f10700 | 2011-03-07 10:00:35 +1100 | [diff] [blame] | 421 | xfs_err(NULL, | 
| Tetsuo Handa | 5bf97b1 | 2015-10-12 15:41:29 +1100 | [diff] [blame] | 422 | "%s(%u) possible memory allocation deadlock in %s (mode:0x%x)", | 
|  | 423 | current->comm, current->pid, | 
| Harvey Harrison | 34a622b | 2008-04-10 12:19:21 +1000 | [diff] [blame] | 424 | __func__, gfp_mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 |  | 
| Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 426 | XFS_STATS_INC(bp->b_target->bt_mount, xb_page_retries); | 
| Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 427 | congestion_wait(BLK_RW_ASYNC, HZ/50); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | goto retry; | 
|  | 429 | } | 
|  | 430 |  | 
| Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 431 | XFS_STATS_INC(bp->b_target->bt_mount, xb_page_found); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 |  | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 433 | nbytes = min_t(size_t, size, PAGE_SIZE - offset); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | size -= nbytes; | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 435 | bp->b_pages[i] = page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | offset = 0; | 
|  | 437 | } | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 438 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 |  | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 440 | out_free_pages: | 
|  | 441 | for (i = 0; i < bp->b_page_count; i++) | 
|  | 442 | __free_page(bp->b_pages[i]); | 
| Darrick J. Wong | 2aa6ba7b | 2017-01-25 20:24:57 -0800 | [diff] [blame] | 443 | bp->b_flags &= ~_XBF_PAGES; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | return error; | 
|  | 445 | } | 
|  | 446 |  | 
|  | 447 | /* | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 448 | *	Map buffer into kernel address-space if necessary. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 | */ | 
|  | 450 | STATIC int | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 451 | _xfs_buf_map_pages( | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | xfs_buf_t		*bp, | 
|  | 453 | uint			flags) | 
|  | 454 | { | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 455 | ASSERT(bp->b_flags & _XBF_PAGES); | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 456 | if (bp->b_page_count == 1) { | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 457 | /* A single page buffer is always mappable */ | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 458 | bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; | 
| Dave Chinner | 611c994 | 2012-04-23 15:59:07 +1000 | [diff] [blame] | 459 | } else if (flags & XBF_UNMAPPED) { | 
|  | 460 | bp->b_addr = NULL; | 
|  | 461 | } else { | 
| Dave Chinner | a19fb38 | 2011-03-26 09:13:42 +1100 | [diff] [blame] | 462 | int retried = 0; | 
| Michal Hocko | 9ba1fb2 | 2017-05-03 14:53:19 -0700 | [diff] [blame] | 463 | unsigned nofs_flag; | 
| Dave Chinner | a19fb38 | 2011-03-26 09:13:42 +1100 | [diff] [blame] | 464 |  | 
| Dave Chinner | ae687e5 | 2014-03-07 16:19:14 +1100 | [diff] [blame] | 465 | /* | 
|  | 466 | * vm_map_ram() will allocate auxillary structures (e.g. | 
|  | 467 | * pagetables) with GFP_KERNEL, yet we are likely to be under | 
|  | 468 | * GFP_NOFS context here. Hence we need to tell memory reclaim | 
| Michal Hocko | 9ba1fb2 | 2017-05-03 14:53:19 -0700 | [diff] [blame] | 469 | * that we are in such a context via PF_MEMALLOC_NOFS to prevent | 
| Dave Chinner | ae687e5 | 2014-03-07 16:19:14 +1100 | [diff] [blame] | 470 | * memory reclaim re-entering the filesystem here and | 
|  | 471 | * potentially deadlocking. | 
|  | 472 | */ | 
| Michal Hocko | 9ba1fb2 | 2017-05-03 14:53:19 -0700 | [diff] [blame] | 473 | nofs_flag = memalloc_nofs_save(); | 
| Dave Chinner | a19fb38 | 2011-03-26 09:13:42 +1100 | [diff] [blame] | 474 | do { | 
|  | 475 | bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, | 
|  | 476 | -1, PAGE_KERNEL); | 
|  | 477 | if (bp->b_addr) | 
|  | 478 | break; | 
|  | 479 | vm_unmap_aliases(); | 
|  | 480 | } while (retried++ <= 1); | 
| Michal Hocko | 9ba1fb2 | 2017-05-03 14:53:19 -0700 | [diff] [blame] | 481 | memalloc_nofs_restore(nofs_flag); | 
| Dave Chinner | a19fb38 | 2011-03-26 09:13:42 +1100 | [diff] [blame] | 482 |  | 
|  | 483 | if (!bp->b_addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | return -ENOMEM; | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 485 | bp->b_addr += bp->b_offset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | } | 
|  | 487 |  | 
|  | 488 | return 0; | 
|  | 489 | } | 
|  | 490 |  | 
|  | 491 | /* | 
|  | 492 | *	Finding and Reading Buffers | 
|  | 493 | */ | 
| Lucas Stach | 6031e73 | 2016-12-07 17:36:36 +1100 | [diff] [blame] | 494 | static int | 
|  | 495 | _xfs_buf_obj_cmp( | 
|  | 496 | struct rhashtable_compare_arg	*arg, | 
|  | 497 | const void			*obj) | 
|  | 498 | { | 
|  | 499 | const struct xfs_buf_map	*map = arg->key; | 
|  | 500 | const struct xfs_buf		*bp = obj; | 
|  | 501 |  | 
|  | 502 | /* | 
|  | 503 | * The key hashing in the lookup path depends on the key being the | 
|  | 504 | * first element of the compare_arg, make sure to assert this. | 
|  | 505 | */ | 
|  | 506 | BUILD_BUG_ON(offsetof(struct xfs_buf_map, bm_bn) != 0); | 
|  | 507 |  | 
|  | 508 | if (bp->b_bn != map->bm_bn) | 
|  | 509 | return 1; | 
|  | 510 |  | 
|  | 511 | if (unlikely(bp->b_length != map->bm_len)) { | 
|  | 512 | /* | 
|  | 513 | * found a block number match. If the range doesn't | 
|  | 514 | * match, the only way this is allowed is if the buffer | 
|  | 515 | * in the cache is stale and the transaction that made | 
|  | 516 | * it stale has not yet committed. i.e. we are | 
|  | 517 | * reallocating a busy extent. Skip this buffer and | 
|  | 518 | * continue searching for an exact match. | 
|  | 519 | */ | 
|  | 520 | ASSERT(bp->b_flags & XBF_STALE); | 
|  | 521 | return 1; | 
|  | 522 | } | 
|  | 523 | return 0; | 
|  | 524 | } | 
|  | 525 |  | 
|  | 526 | static const struct rhashtable_params xfs_buf_hash_params = { | 
|  | 527 | .min_size		= 32,	/* empty AGs have minimal footprint */ | 
|  | 528 | .nelem_hint		= 16, | 
|  | 529 | .key_len		= sizeof(xfs_daddr_t), | 
|  | 530 | .key_offset		= offsetof(struct xfs_buf, b_bn), | 
|  | 531 | .head_offset		= offsetof(struct xfs_buf, b_rhash_head), | 
|  | 532 | .automatic_shrinking	= true, | 
|  | 533 | .obj_cmpfn		= _xfs_buf_obj_cmp, | 
|  | 534 | }; | 
|  | 535 |  | 
|  | 536 | int | 
|  | 537 | xfs_buf_hash_init( | 
|  | 538 | struct xfs_perag	*pag) | 
|  | 539 | { | 
|  | 540 | spin_lock_init(&pag->pag_buf_lock); | 
|  | 541 | return rhashtable_init(&pag->pag_buf_hash, &xfs_buf_hash_params); | 
|  | 542 | } | 
|  | 543 |  | 
|  | 544 | void | 
|  | 545 | xfs_buf_hash_destroy( | 
|  | 546 | struct xfs_perag	*pag) | 
|  | 547 | { | 
|  | 548 | rhashtable_destroy(&pag->pag_buf_hash); | 
|  | 549 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 |  | 
|  | 551 | /* | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 552 | *	Look up, and creates if absent, a lockable buffer for | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 553 | *	a given range of an inode.  The buffer is returned | 
| Chandra Seetharaman | eabbaf1 | 2011-09-08 20:18:50 +0000 | [diff] [blame] | 554 | *	locked.	No I/O is implied by this call. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | */ | 
|  | 556 | xfs_buf_t * | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 557 | _xfs_buf_find( | 
| Dave Chinner | e70b73f | 2012-04-23 15:58:49 +1000 | [diff] [blame] | 558 | struct xfs_buftarg	*btp, | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 559 | struct xfs_buf_map	*map, | 
|  | 560 | int			nmaps, | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 561 | xfs_buf_flags_t		flags, | 
|  | 562 | xfs_buf_t		*new_bp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 | { | 
| Dave Chinner | 74f75a0 | 2010-09-24 19:59:04 +1000 | [diff] [blame] | 564 | struct xfs_perag	*pag; | 
| Dave Chinner | 74f75a0 | 2010-09-24 19:59:04 +1000 | [diff] [blame] | 565 | xfs_buf_t		*bp; | 
| Lucas Stach | 6031e73 | 2016-12-07 17:36:36 +1100 | [diff] [blame] | 566 | struct xfs_buf_map	cmap = { .bm_bn = map[0].bm_bn }; | 
| Dave Chinner | 10616b80 | 2013-01-21 23:53:52 +1100 | [diff] [blame] | 567 | xfs_daddr_t		eofs; | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 568 | int			i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 569 |  | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 570 | for (i = 0; i < nmaps; i++) | 
| Lucas Stach | 6031e73 | 2016-12-07 17:36:36 +1100 | [diff] [blame] | 571 | cmap.bm_len += map[i].bm_len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 572 |  | 
|  | 573 | /* Check for IOs smaller than the sector size / not sector aligned */ | 
| Lucas Stach | 6031e73 | 2016-12-07 17:36:36 +1100 | [diff] [blame] | 574 | ASSERT(!(BBTOB(cmap.bm_len) < btp->bt_meta_sectorsize)); | 
|  | 575 | ASSERT(!(BBTOB(cmap.bm_bn) & (xfs_off_t)btp->bt_meta_sectormask)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 576 |  | 
| Dave Chinner | 10616b80 | 2013-01-21 23:53:52 +1100 | [diff] [blame] | 577 | /* | 
|  | 578 | * Corrupted block numbers can get through to here, unfortunately, so we | 
|  | 579 | * have to check that the buffer falls within the filesystem bounds. | 
|  | 580 | */ | 
|  | 581 | eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks); | 
| Lucas Stach | 6031e73 | 2016-12-07 17:36:36 +1100 | [diff] [blame] | 582 | if (cmap.bm_bn < 0 || cmap.bm_bn >= eofs) { | 
| Dave Chinner | 10616b80 | 2013-01-21 23:53:52 +1100 | [diff] [blame] | 583 | /* | 
| Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 584 | * XXX (dgc): we should really be returning -EFSCORRUPTED here, | 
| Dave Chinner | 10616b80 | 2013-01-21 23:53:52 +1100 | [diff] [blame] | 585 | * but none of the higher level infrastructure supports | 
|  | 586 | * returning a specific error on buffer lookup failures. | 
|  | 587 | */ | 
|  | 588 | xfs_alert(btp->bt_mount, | 
| Darrick J. Wong | c219b01 | 2018-01-08 11:39:18 -0800 | [diff] [blame] | 589 | "%s: daddr 0x%llx out of range, EOFS 0x%llx", | 
| Lucas Stach | 6031e73 | 2016-12-07 17:36:36 +1100 | [diff] [blame] | 590 | __func__, cmap.bm_bn, eofs); | 
| Dave Chinner | 7bc0dc2 | 2013-05-21 18:02:08 +1000 | [diff] [blame] | 591 | WARN_ON(1); | 
| Dave Chinner | 10616b80 | 2013-01-21 23:53:52 +1100 | [diff] [blame] | 592 | return NULL; | 
|  | 593 | } | 
|  | 594 |  | 
| Dave Chinner | 74f75a0 | 2010-09-24 19:59:04 +1000 | [diff] [blame] | 595 | pag = xfs_perag_get(btp->bt_mount, | 
| Lucas Stach | 6031e73 | 2016-12-07 17:36:36 +1100 | [diff] [blame] | 596 | xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 597 |  | 
| Dave Chinner | 74f75a0 | 2010-09-24 19:59:04 +1000 | [diff] [blame] | 598 | spin_lock(&pag->pag_buf_lock); | 
| Lucas Stach | 6031e73 | 2016-12-07 17:36:36 +1100 | [diff] [blame] | 599 | bp = rhashtable_lookup_fast(&pag->pag_buf_hash, &cmap, | 
|  | 600 | xfs_buf_hash_params); | 
|  | 601 | if (bp) { | 
|  | 602 | atomic_inc(&bp->b_hold); | 
|  | 603 | goto found; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 604 | } | 
|  | 605 |  | 
|  | 606 | /* No match found */ | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 607 | if (new_bp) { | 
| Dave Chinner | 74f75a0 | 2010-09-24 19:59:04 +1000 | [diff] [blame] | 608 | /* the buffer keeps the perag reference until it is freed */ | 
|  | 609 | new_bp->b_pag = pag; | 
| Lucas Stach | 6031e73 | 2016-12-07 17:36:36 +1100 | [diff] [blame] | 610 | rhashtable_insert_fast(&pag->pag_buf_hash, | 
|  | 611 | &new_bp->b_rhash_head, | 
|  | 612 | xfs_buf_hash_params); | 
| Dave Chinner | 74f75a0 | 2010-09-24 19:59:04 +1000 | [diff] [blame] | 613 | spin_unlock(&pag->pag_buf_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | } else { | 
| Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 615 | XFS_STATS_INC(btp->bt_mount, xb_miss_locked); | 
| Dave Chinner | 74f75a0 | 2010-09-24 19:59:04 +1000 | [diff] [blame] | 616 | spin_unlock(&pag->pag_buf_lock); | 
|  | 617 | xfs_perag_put(pag); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 618 | } | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 619 | return new_bp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 |  | 
|  | 621 | found: | 
| Dave Chinner | 74f75a0 | 2010-09-24 19:59:04 +1000 | [diff] [blame] | 622 | spin_unlock(&pag->pag_buf_lock); | 
|  | 623 | xfs_perag_put(pag); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 |  | 
| Christoph Hellwig | 0c842ad | 2011-07-08 14:36:19 +0200 | [diff] [blame] | 625 | if (!xfs_buf_trylock(bp)) { | 
|  | 626 | if (flags & XBF_TRYLOCK) { | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 627 | xfs_buf_rele(bp); | 
| Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 628 | XFS_STATS_INC(btp->bt_mount, xb_busy_locked); | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 629 | return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | } | 
| Christoph Hellwig | 0c842ad | 2011-07-08 14:36:19 +0200 | [diff] [blame] | 631 | xfs_buf_lock(bp); | 
| Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 632 | XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 633 | } | 
|  | 634 |  | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 635 | /* | 
|  | 636 | * if the buffer is stale, clear all the external state associated with | 
|  | 637 | * it. We need to keep flags such as how we allocated the buffer memory | 
|  | 638 | * intact here. | 
|  | 639 | */ | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 640 | if (bp->b_flags & XBF_STALE) { | 
|  | 641 | ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); | 
| Dave Chinner | cfb0285 | 2012-11-12 22:54:19 +1100 | [diff] [blame] | 642 | ASSERT(bp->b_iodone == NULL); | 
| Dave Chinner | 611c994 | 2012-04-23 15:59:07 +1000 | [diff] [blame] | 643 | bp->b_flags &= _XBF_KMEM | _XBF_PAGES; | 
| Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 644 | bp->b_ops = NULL; | 
| David Chinner | 2f92658 | 2005-09-05 08:33:35 +1000 | [diff] [blame] | 645 | } | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 646 |  | 
|  | 647 | trace_xfs_buf_find(bp, flags, _RET_IP_); | 
| Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 648 | XFS_STATS_INC(btp->bt_mount, xb_get_locked); | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 649 | return bp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 650 | } | 
|  | 651 |  | 
|  | 652 | /* | 
| Dave Chinner | 3815832 | 2011-09-30 04:45:02 +0000 | [diff] [blame] | 653 | * Assembles a buffer covering the specified range. The code is optimised for | 
|  | 654 | * cache hits, as metadata intensive workloads will see 3 orders of magnitude | 
|  | 655 | * more hits than misses. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | */ | 
| Dave Chinner | 3815832 | 2011-09-30 04:45:02 +0000 | [diff] [blame] | 657 | struct xfs_buf * | 
| Dave Chinner | 6dde270 | 2012-06-22 18:50:10 +1000 | [diff] [blame] | 658 | xfs_buf_get_map( | 
|  | 659 | struct xfs_buftarg	*target, | 
|  | 660 | struct xfs_buf_map	*map, | 
|  | 661 | int			nmaps, | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 662 | xfs_buf_flags_t		flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 663 | { | 
| Dave Chinner | 3815832 | 2011-09-30 04:45:02 +0000 | [diff] [blame] | 664 | struct xfs_buf		*bp; | 
|  | 665 | struct xfs_buf		*new_bp; | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 666 | int			error = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 667 |  | 
| Dave Chinner | 6dde270 | 2012-06-22 18:50:10 +1000 | [diff] [blame] | 668 | bp = _xfs_buf_find(target, map, nmaps, flags, NULL); | 
| Dave Chinner | 3815832 | 2011-09-30 04:45:02 +0000 | [diff] [blame] | 669 | if (likely(bp)) | 
|  | 670 | goto found; | 
|  | 671 |  | 
| Dave Chinner | 6dde270 | 2012-06-22 18:50:10 +1000 | [diff] [blame] | 672 | new_bp = _xfs_buf_alloc(target, map, nmaps, flags); | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 673 | if (unlikely(!new_bp)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 674 | return NULL; | 
|  | 675 |  | 
| Dave Chinner | fe2429b | 2012-04-23 15:58:45 +1000 | [diff] [blame] | 676 | error = xfs_buf_allocate_memory(new_bp, flags); | 
|  | 677 | if (error) { | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 678 | xfs_buf_free(new_bp); | 
| Dave Chinner | 3815832 | 2011-09-30 04:45:02 +0000 | [diff] [blame] | 679 | return NULL; | 
|  | 680 | } | 
|  | 681 |  | 
| Dave Chinner | 6dde270 | 2012-06-22 18:50:10 +1000 | [diff] [blame] | 682 | bp = _xfs_buf_find(target, map, nmaps, flags, new_bp); | 
| Dave Chinner | fe2429b | 2012-04-23 15:58:45 +1000 | [diff] [blame] | 683 | if (!bp) { | 
|  | 684 | xfs_buf_free(new_bp); | 
|  | 685 | return NULL; | 
|  | 686 | } | 
|  | 687 |  | 
|  | 688 | if (bp != new_bp) | 
|  | 689 | xfs_buf_free(new_bp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 690 |  | 
| Dave Chinner | 3815832 | 2011-09-30 04:45:02 +0000 | [diff] [blame] | 691 | found: | 
| Dave Chinner | 611c994 | 2012-04-23 15:59:07 +1000 | [diff] [blame] | 692 | if (!bp->b_addr) { | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 693 | error = _xfs_buf_map_pages(bp, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 694 | if (unlikely(error)) { | 
| Dave Chinner | 4f10700 | 2011-03-07 10:00:35 +1100 | [diff] [blame] | 695 | xfs_warn(target->bt_mount, | 
| Eric Sandeen | 08e96e1 | 2013-10-11 20:59:05 -0500 | [diff] [blame] | 696 | "%s: failed to map pagesn", __func__); | 
| Dave Chinner | a8acad7 | 2012-04-23 15:58:54 +1000 | [diff] [blame] | 697 | xfs_buf_relse(bp); | 
|  | 698 | return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 699 | } | 
|  | 700 | } | 
|  | 701 |  | 
| Dave Chinner | b79f4a1 | 2016-01-12 07:03:44 +1100 | [diff] [blame] | 702 | /* | 
|  | 703 | * Clear b_error if this is a lookup from a caller that doesn't expect | 
|  | 704 | * valid data to be found in the buffer. | 
|  | 705 | */ | 
|  | 706 | if (!(flags & XBF_READ)) | 
|  | 707 | xfs_buf_ioerror(bp, 0); | 
|  | 708 |  | 
| Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 709 | XFS_STATS_INC(target->bt_mount, xb_get); | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 710 | trace_xfs_buf_get(bp, flags, _RET_IP_); | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 711 | return bp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 712 | } | 
|  | 713 |  | 
| Christoph Hellwig | 5d765b9 | 2008-12-03 12:20:26 +0100 | [diff] [blame] | 714 | STATIC int | 
|  | 715 | _xfs_buf_read( | 
|  | 716 | xfs_buf_t		*bp, | 
|  | 717 | xfs_buf_flags_t		flags) | 
|  | 718 | { | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 719 | ASSERT(!(flags & XBF_WRITE)); | 
| Mark Tinguely | f4b4242 | 2012-12-04 17:18:02 -0600 | [diff] [blame] | 720 | ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL); | 
| Christoph Hellwig | 5d765b9 | 2008-12-03 12:20:26 +0100 | [diff] [blame] | 721 |  | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 722 | bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD); | 
| Christoph Hellwig | 1d5ae5d | 2011-07-08 14:36:32 +0200 | [diff] [blame] | 723 | bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); | 
| Christoph Hellwig | 5d765b9 | 2008-12-03 12:20:26 +0100 | [diff] [blame] | 724 |  | 
| Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 725 | if (flags & XBF_ASYNC) { | 
|  | 726 | xfs_buf_submit(bp); | 
| Dave Chinner | 0e95f19 | 2012-04-23 15:58:46 +1000 | [diff] [blame] | 727 | return 0; | 
| Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 728 | } | 
|  | 729 | return xfs_buf_submit_wait(bp); | 
| Christoph Hellwig | 5d765b9 | 2008-12-03 12:20:26 +0100 | [diff] [blame] | 730 | } | 
|  | 731 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 732 | xfs_buf_t * | 
| Dave Chinner | 6dde270 | 2012-06-22 18:50:10 +1000 | [diff] [blame] | 733 | xfs_buf_read_map( | 
|  | 734 | struct xfs_buftarg	*target, | 
|  | 735 | struct xfs_buf_map	*map, | 
|  | 736 | int			nmaps, | 
| Dave Chinner | c3f8fc7 | 2012-11-12 22:54:01 +1100 | [diff] [blame] | 737 | xfs_buf_flags_t		flags, | 
| Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 738 | const struct xfs_buf_ops *ops) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 739 | { | 
| Dave Chinner | 6dde270 | 2012-06-22 18:50:10 +1000 | [diff] [blame] | 740 | struct xfs_buf		*bp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 741 |  | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 742 | flags |= XBF_READ; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 743 |  | 
| Dave Chinner | 6dde270 | 2012-06-22 18:50:10 +1000 | [diff] [blame] | 744 | bp = xfs_buf_get_map(target, map, nmaps, flags); | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 745 | if (bp) { | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 746 | trace_xfs_buf_read(bp, flags, _RET_IP_); | 
|  | 747 |  | 
| Dave Chinner | b0388bf | 2016-02-10 15:01:11 +1100 | [diff] [blame] | 748 | if (!(bp->b_flags & XBF_DONE)) { | 
| Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 749 | XFS_STATS_INC(target->bt_mount, xb_get_read); | 
| Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 750 | bp->b_ops = ops; | 
| Christoph Hellwig | 5d765b9 | 2008-12-03 12:20:26 +0100 | [diff] [blame] | 751 | _xfs_buf_read(bp, flags); | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 752 | } else if (flags & XBF_ASYNC) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 753 | /* | 
|  | 754 | * Read ahead call which is already satisfied, | 
|  | 755 | * drop the buffer | 
|  | 756 | */ | 
| Dave Chinner | a8acad7 | 2012-04-23 15:58:54 +1000 | [diff] [blame] | 757 | xfs_buf_relse(bp); | 
|  | 758 | return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 759 | } else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 760 | /* We do not want read in the flags */ | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 761 | bp->b_flags &= ~XBF_READ; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 762 | } | 
|  | 763 | } | 
|  | 764 |  | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 765 | return bp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 766 | } | 
|  | 767 |  | 
|  | 768 | /* | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 769 | *	If we are not low on memory then do the readahead in a deadlock | 
|  | 770 | *	safe manner. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 771 | */ | 
|  | 772 | void | 
| Dave Chinner | 6dde270 | 2012-06-22 18:50:10 +1000 | [diff] [blame] | 773 | xfs_buf_readahead_map( | 
|  | 774 | struct xfs_buftarg	*target, | 
|  | 775 | struct xfs_buf_map	*map, | 
| Dave Chinner | c3f8fc7 | 2012-11-12 22:54:01 +1100 | [diff] [blame] | 776 | int			nmaps, | 
| Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 777 | const struct xfs_buf_ops *ops) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 | { | 
| Jan Kara | efa7c9f | 2017-02-02 15:56:53 +0100 | [diff] [blame] | 779 | if (bdi_read_congested(target->bt_bdev->bd_bdi)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 780 | return; | 
|  | 781 |  | 
| Dave Chinner | 6dde270 | 2012-06-22 18:50:10 +1000 | [diff] [blame] | 782 | xfs_buf_read_map(target, map, nmaps, | 
| Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 783 | XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 784 | } | 
|  | 785 |  | 
| Dave Chinner | 5adc94c | 2010-09-24 21:58:31 +1000 | [diff] [blame] | 786 | /* | 
|  | 787 | * Read an uncached buffer from disk. Allocates and returns a locked | 
|  | 788 | * buffer containing the disk contents or nothing. | 
|  | 789 | */ | 
| Dave Chinner | ba37267 | 2014-10-02 09:05:32 +1000 | [diff] [blame] | 790 | int | 
| Dave Chinner | 5adc94c | 2010-09-24 21:58:31 +1000 | [diff] [blame] | 791 | xfs_buf_read_uncached( | 
| Dave Chinner | 5adc94c | 2010-09-24 21:58:31 +1000 | [diff] [blame] | 792 | struct xfs_buftarg	*target, | 
|  | 793 | xfs_daddr_t		daddr, | 
| Dave Chinner | e70b73f | 2012-04-23 15:58:49 +1000 | [diff] [blame] | 794 | size_t			numblks, | 
| Dave Chinner | c3f8fc7 | 2012-11-12 22:54:01 +1100 | [diff] [blame] | 795 | int			flags, | 
| Dave Chinner | ba37267 | 2014-10-02 09:05:32 +1000 | [diff] [blame] | 796 | struct xfs_buf		**bpp, | 
| Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 797 | const struct xfs_buf_ops *ops) | 
| Dave Chinner | 5adc94c | 2010-09-24 21:58:31 +1000 | [diff] [blame] | 798 | { | 
| Dave Chinner | eab4e63 | 2012-11-12 22:54:02 +1100 | [diff] [blame] | 799 | struct xfs_buf		*bp; | 
| Dave Chinner | 5adc94c | 2010-09-24 21:58:31 +1000 | [diff] [blame] | 800 |  | 
| Dave Chinner | ba37267 | 2014-10-02 09:05:32 +1000 | [diff] [blame] | 801 | *bpp = NULL; | 
|  | 802 |  | 
| Dave Chinner | e70b73f | 2012-04-23 15:58:49 +1000 | [diff] [blame] | 803 | bp = xfs_buf_get_uncached(target, numblks, flags); | 
| Dave Chinner | 5adc94c | 2010-09-24 21:58:31 +1000 | [diff] [blame] | 804 | if (!bp) | 
| Dave Chinner | ba37267 | 2014-10-02 09:05:32 +1000 | [diff] [blame] | 805 | return -ENOMEM; | 
| Dave Chinner | 5adc94c | 2010-09-24 21:58:31 +1000 | [diff] [blame] | 806 |  | 
|  | 807 | /* set up the buffer for a read IO */ | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 808 | ASSERT(bp->b_map_count == 1); | 
| Dave Chinner | ba37267 | 2014-10-02 09:05:32 +1000 | [diff] [blame] | 809 | bp->b_bn = XFS_BUF_DADDR_NULL;  /* always null for uncached buffers */ | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 810 | bp->b_maps[0].bm_bn = daddr; | 
| Dave Chinner | cbb7baa | 2012-06-22 18:50:08 +1000 | [diff] [blame] | 811 | bp->b_flags |= XBF_READ; | 
| Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 812 | bp->b_ops = ops; | 
| Dave Chinner | 5adc94c | 2010-09-24 21:58:31 +1000 | [diff] [blame] | 813 |  | 
| Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 814 | xfs_buf_submit_wait(bp); | 
| Dave Chinner | ba37267 | 2014-10-02 09:05:32 +1000 | [diff] [blame] | 815 | if (bp->b_error) { | 
|  | 816 | int	error = bp->b_error; | 
| Christoph Hellwig | 83a0adc | 2013-12-17 00:03:52 -0800 | [diff] [blame] | 817 | xfs_buf_relse(bp); | 
| Dave Chinner | ba37267 | 2014-10-02 09:05:32 +1000 | [diff] [blame] | 818 | return error; | 
| Christoph Hellwig | 83a0adc | 2013-12-17 00:03:52 -0800 | [diff] [blame] | 819 | } | 
| Dave Chinner | ba37267 | 2014-10-02 09:05:32 +1000 | [diff] [blame] | 820 |  | 
|  | 821 | *bpp = bp; | 
|  | 822 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 823 | } | 
|  | 824 |  | 
| Dave Chinner | 4439647 | 2011-04-21 09:34:27 +0000 | [diff] [blame] | 825 | /* | 
|  | 826 | * Return a buffer allocated as an empty buffer and associated to external | 
|  | 827 | * memory via xfs_buf_associate_memory() back to it's empty state. | 
|  | 828 | */ | 
|  | 829 | void | 
|  | 830 | xfs_buf_set_empty( | 
|  | 831 | struct xfs_buf		*bp, | 
| Dave Chinner | e70b73f | 2012-04-23 15:58:49 +1000 | [diff] [blame] | 832 | size_t			numblks) | 
| Dave Chinner | 4439647 | 2011-04-21 09:34:27 +0000 | [diff] [blame] | 833 | { | 
|  | 834 | if (bp->b_pages) | 
|  | 835 | _xfs_buf_free_pages(bp); | 
|  | 836 |  | 
|  | 837 | bp->b_pages = NULL; | 
|  | 838 | bp->b_page_count = 0; | 
|  | 839 | bp->b_addr = NULL; | 
| Dave Chinner | 4e94b71 | 2012-04-23 15:58:51 +1000 | [diff] [blame] | 840 | bp->b_length = numblks; | 
| Dave Chinner | aa0e883 | 2012-04-23 15:58:52 +1000 | [diff] [blame] | 841 | bp->b_io_length = numblks; | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 842 |  | 
|  | 843 | ASSERT(bp->b_map_count == 1); | 
| Dave Chinner | 4439647 | 2011-04-21 09:34:27 +0000 | [diff] [blame] | 844 | bp->b_bn = XFS_BUF_DADDR_NULL; | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 845 | bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL; | 
|  | 846 | bp->b_maps[0].bm_len = bp->b_length; | 
| Dave Chinner | 4439647 | 2011-04-21 09:34:27 +0000 | [diff] [blame] | 847 | } | 
|  | 848 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 849 | static inline struct page * | 
|  | 850 | mem_to_page( | 
|  | 851 | void			*addr) | 
|  | 852 | { | 
| Christoph Lameter | 9e2779f | 2008-02-04 22:28:34 -0800 | [diff] [blame] | 853 | if ((!is_vmalloc_addr(addr))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 854 | return virt_to_page(addr); | 
|  | 855 | } else { | 
|  | 856 | return vmalloc_to_page(addr); | 
|  | 857 | } | 
|  | 858 | } | 
|  | 859 |  | 
|  | 860 | int | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 861 | xfs_buf_associate_memory( | 
|  | 862 | xfs_buf_t		*bp, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 863 | void			*mem, | 
|  | 864 | size_t			len) | 
|  | 865 | { | 
|  | 866 | int			rval; | 
|  | 867 | int			i = 0; | 
| Lachlan McIlroy | d1afb67 | 2007-11-27 17:01:24 +1100 | [diff] [blame] | 868 | unsigned long		pageaddr; | 
|  | 869 | unsigned long		offset; | 
|  | 870 | size_t			buflen; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 871 | int			page_count; | 
|  | 872 |  | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 873 | pageaddr = (unsigned long)mem & PAGE_MASK; | 
| Lachlan McIlroy | d1afb67 | 2007-11-27 17:01:24 +1100 | [diff] [blame] | 874 | offset = (unsigned long)mem - pageaddr; | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 875 | buflen = PAGE_ALIGN(len + offset); | 
|  | 876 | page_count = buflen >> PAGE_SHIFT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 |  | 
|  | 878 | /* Free any previous set of page pointers */ | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 879 | if (bp->b_pages) | 
|  | 880 | _xfs_buf_free_pages(bp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 881 |  | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 882 | bp->b_pages = NULL; | 
|  | 883 | bp->b_addr = mem; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 884 |  | 
| Eric Sandeen | 87937bf | 2014-04-14 19:01:20 +1000 | [diff] [blame] | 885 | rval = _xfs_buf_get_pages(bp, page_count); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 886 | if (rval) | 
|  | 887 | return rval; | 
|  | 888 |  | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 889 | bp->b_offset = offset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 890 |  | 
| Lachlan McIlroy | d1afb67 | 2007-11-27 17:01:24 +1100 | [diff] [blame] | 891 | for (i = 0; i < bp->b_page_count; i++) { | 
|  | 892 | bp->b_pages[i] = mem_to_page((void *)pageaddr); | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 893 | pageaddr += PAGE_SIZE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 894 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 895 |  | 
| Dave Chinner | aa0e883 | 2012-04-23 15:58:52 +1000 | [diff] [blame] | 896 | bp->b_io_length = BTOBB(len); | 
| Dave Chinner | 4e94b71 | 2012-04-23 15:58:51 +1000 | [diff] [blame] | 897 | bp->b_length = BTOBB(buflen); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 898 |  | 
|  | 899 | return 0; | 
|  | 900 | } | 
|  | 901 |  | 
|  | 902 | xfs_buf_t * | 
| Dave Chinner | 686865f | 2010-09-24 20:07:47 +1000 | [diff] [blame] | 903 | xfs_buf_get_uncached( | 
|  | 904 | struct xfs_buftarg	*target, | 
| Dave Chinner | e70b73f | 2012-04-23 15:58:49 +1000 | [diff] [blame] | 905 | size_t			numblks, | 
| Dave Chinner | 686865f | 2010-09-24 20:07:47 +1000 | [diff] [blame] | 906 | int			flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 907 | { | 
| Dave Chinner | e70b73f | 2012-04-23 15:58:49 +1000 | [diff] [blame] | 908 | unsigned long		page_count; | 
| Christoph Hellwig | 1fa40b0 | 2007-05-14 18:23:50 +1000 | [diff] [blame] | 909 | int			error, i; | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 910 | struct xfs_buf		*bp; | 
|  | 911 | DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 912 |  | 
| Brian Foster | c891c30 | 2016-07-20 11:13:43 +1000 | [diff] [blame] | 913 | /* flags might contain irrelevant bits, pass only what we care about */ | 
|  | 914 | bp = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 915 | if (unlikely(bp == NULL)) | 
|  | 916 | goto fail; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 917 |  | 
| Dave Chinner | e70b73f | 2012-04-23 15:58:49 +1000 | [diff] [blame] | 918 | page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT; | 
| Eric Sandeen | 87937bf | 2014-04-14 19:01:20 +1000 | [diff] [blame] | 919 | error = _xfs_buf_get_pages(bp, page_count); | 
| Christoph Hellwig | 1fa40b0 | 2007-05-14 18:23:50 +1000 | [diff] [blame] | 920 | if (error) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 921 | goto fail_free_buf; | 
|  | 922 |  | 
| Christoph Hellwig | 1fa40b0 | 2007-05-14 18:23:50 +1000 | [diff] [blame] | 923 | for (i = 0; i < page_count; i++) { | 
| Dave Chinner | 686865f | 2010-09-24 20:07:47 +1000 | [diff] [blame] | 924 | bp->b_pages[i] = alloc_page(xb_to_gfp(flags)); | 
| Christoph Hellwig | 1fa40b0 | 2007-05-14 18:23:50 +1000 | [diff] [blame] | 925 | if (!bp->b_pages[i]) | 
|  | 926 | goto fail_free_mem; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 927 | } | 
| Christoph Hellwig | 1fa40b0 | 2007-05-14 18:23:50 +1000 | [diff] [blame] | 928 | bp->b_flags |= _XBF_PAGES; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 |  | 
| Dave Chinner | 611c994 | 2012-04-23 15:59:07 +1000 | [diff] [blame] | 930 | error = _xfs_buf_map_pages(bp, 0); | 
| Christoph Hellwig | 1fa40b0 | 2007-05-14 18:23:50 +1000 | [diff] [blame] | 931 | if (unlikely(error)) { | 
| Dave Chinner | 4f10700 | 2011-03-07 10:00:35 +1100 | [diff] [blame] | 932 | xfs_warn(target->bt_mount, | 
| Eric Sandeen | 08e96e1 | 2013-10-11 20:59:05 -0500 | [diff] [blame] | 933 | "%s: failed to map pages", __func__); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 934 | goto fail_free_mem; | 
| Christoph Hellwig | 1fa40b0 | 2007-05-14 18:23:50 +1000 | [diff] [blame] | 935 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 936 |  | 
| Dave Chinner | 686865f | 2010-09-24 20:07:47 +1000 | [diff] [blame] | 937 | trace_xfs_buf_get_uncached(bp, _RET_IP_); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 938 | return bp; | 
| Christoph Hellwig | 1fa40b0 | 2007-05-14 18:23:50 +1000 | [diff] [blame] | 939 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 940 | fail_free_mem: | 
| Christoph Hellwig | 1fa40b0 | 2007-05-14 18:23:50 +1000 | [diff] [blame] | 941 | while (--i >= 0) | 
|  | 942 | __free_page(bp->b_pages[i]); | 
| Christoph Hellwig | ca165b8 | 2007-05-24 15:21:11 +1000 | [diff] [blame] | 943 | _xfs_buf_free_pages(bp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 944 | fail_free_buf: | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 945 | xfs_buf_free_maps(bp); | 
| Christoph Hellwig | 4347b9d | 2011-10-10 16:52:48 +0000 | [diff] [blame] | 946 | kmem_zone_free(xfs_buf_zone, bp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 947 | fail: | 
|  | 948 | return NULL; | 
|  | 949 | } | 
|  | 950 |  | 
|  | 951 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 952 | *	Increment reference count on buffer, to hold the buffer concurrently | 
|  | 953 | *	with another thread which may release (free) the buffer asynchronously. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 954 | *	Must hold the buffer already to call this function. | 
|  | 955 | */ | 
|  | 956 | void | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 957 | xfs_buf_hold( | 
|  | 958 | xfs_buf_t		*bp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 959 | { | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 960 | trace_xfs_buf_hold(bp, _RET_IP_); | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 961 | atomic_inc(&bp->b_hold); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 962 | } | 
|  | 963 |  | 
|  | 964 | /* | 
| Brian Foster | 9c7504a | 2016-07-20 11:15:28 +1000 | [diff] [blame] | 965 | * Release a hold on the specified buffer. If the hold count is 1, the buffer is | 
|  | 966 | * placed on LRU or freed (depending on b_lru_ref). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 967 | */ | 
|  | 968 | void | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 969 | xfs_buf_rele( | 
|  | 970 | xfs_buf_t		*bp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 971 | { | 
| Dave Chinner | 74f75a0 | 2010-09-24 19:59:04 +1000 | [diff] [blame] | 972 | struct xfs_perag	*pag = bp->b_pag; | 
| Brian Foster | 9c7504a | 2016-07-20 11:15:28 +1000 | [diff] [blame] | 973 | bool			release; | 
|  | 974 | bool			freebuf = false; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 975 |  | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 976 | trace_xfs_buf_rele(bp, _RET_IP_); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 977 |  | 
| Dave Chinner | 74f75a0 | 2010-09-24 19:59:04 +1000 | [diff] [blame] | 978 | if (!pag) { | 
| Dave Chinner | 430cbeb | 2010-12-02 16:30:55 +1100 | [diff] [blame] | 979 | ASSERT(list_empty(&bp->b_lru)); | 
| Brian Foster | 9c7504a | 2016-07-20 11:15:28 +1000 | [diff] [blame] | 980 | if (atomic_dec_and_test(&bp->b_hold)) { | 
|  | 981 | xfs_buf_ioacct_dec(bp); | 
| Nathan Scott | fad3aa1 | 2006-02-01 12:14:52 +1100 | [diff] [blame] | 982 | xfs_buf_free(bp); | 
| Brian Foster | 9c7504a | 2016-07-20 11:15:28 +1000 | [diff] [blame] | 983 | } | 
| Nathan Scott | fad3aa1 | 2006-02-01 12:14:52 +1100 | [diff] [blame] | 984 | return; | 
|  | 985 | } | 
|  | 986 |  | 
| Lachlan McIlroy | 3790689 | 2008-08-13 15:42:10 +1000 | [diff] [blame] | 987 | ASSERT(atomic_read(&bp->b_hold) > 0); | 
| Dave Chinner | a408235 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 988 |  | 
| Brian Foster | 9c7504a | 2016-07-20 11:15:28 +1000 | [diff] [blame] | 989 | release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock); | 
|  | 990 | spin_lock(&bp->b_lock); | 
|  | 991 | if (!release) { | 
|  | 992 | /* | 
|  | 993 | * Drop the in-flight state if the buffer is already on the LRU | 
|  | 994 | * and it holds the only reference. This is racy because we | 
|  | 995 | * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT | 
|  | 996 | * ensures the decrement occurs only once per-buf. | 
|  | 997 | */ | 
|  | 998 | if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru)) | 
| Brian Foster | 63db7c8 | 2017-05-31 08:22:52 -0700 | [diff] [blame] | 999 | __xfs_buf_ioacct_dec(bp); | 
| Brian Foster | 9c7504a | 2016-07-20 11:15:28 +1000 | [diff] [blame] | 1000 | goto out_unlock; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1001 | } | 
| Brian Foster | 9c7504a | 2016-07-20 11:15:28 +1000 | [diff] [blame] | 1002 |  | 
|  | 1003 | /* the last reference has been dropped ... */ | 
| Brian Foster | 63db7c8 | 2017-05-31 08:22:52 -0700 | [diff] [blame] | 1004 | __xfs_buf_ioacct_dec(bp); | 
| Brian Foster | 9c7504a | 2016-07-20 11:15:28 +1000 | [diff] [blame] | 1005 | if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { | 
|  | 1006 | /* | 
|  | 1007 | * If the buffer is added to the LRU take a new reference to the | 
|  | 1008 | * buffer for the LRU and clear the (now stale) dispose list | 
|  | 1009 | * state flag | 
|  | 1010 | */ | 
|  | 1011 | if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) { | 
|  | 1012 | bp->b_state &= ~XFS_BSTATE_DISPOSE; | 
|  | 1013 | atomic_inc(&bp->b_hold); | 
|  | 1014 | } | 
|  | 1015 | spin_unlock(&pag->pag_buf_lock); | 
|  | 1016 | } else { | 
|  | 1017 | /* | 
|  | 1018 | * most of the time buffers will already be removed from the | 
|  | 1019 | * LRU, so optimise that case by checking for the | 
|  | 1020 | * XFS_BSTATE_DISPOSE flag indicating the last list the buffer | 
|  | 1021 | * was on was the disposal list | 
|  | 1022 | */ | 
|  | 1023 | if (!(bp->b_state & XFS_BSTATE_DISPOSE)) { | 
|  | 1024 | list_lru_del(&bp->b_target->bt_lru, &bp->b_lru); | 
|  | 1025 | } else { | 
|  | 1026 | ASSERT(list_empty(&bp->b_lru)); | 
|  | 1027 | } | 
|  | 1028 |  | 
|  | 1029 | ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); | 
| Lucas Stach | 6031e73 | 2016-12-07 17:36:36 +1100 | [diff] [blame] | 1030 | rhashtable_remove_fast(&pag->pag_buf_hash, &bp->b_rhash_head, | 
|  | 1031 | xfs_buf_hash_params); | 
| Brian Foster | 9c7504a | 2016-07-20 11:15:28 +1000 | [diff] [blame] | 1032 | spin_unlock(&pag->pag_buf_lock); | 
|  | 1033 | xfs_perag_put(pag); | 
|  | 1034 | freebuf = true; | 
|  | 1035 | } | 
|  | 1036 |  | 
|  | 1037 | out_unlock: | 
|  | 1038 | spin_unlock(&bp->b_lock); | 
|  | 1039 |  | 
|  | 1040 | if (freebuf) | 
|  | 1041 | xfs_buf_free(bp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1042 | } | 
|  | 1043 |  | 
|  | 1044 |  | 
|  | 1045 | /* | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 1046 | *	Lock a buffer object, if it is not already locked. | 
| Dave Chinner | 90810b9 | 2010-11-30 15:16:16 +1100 | [diff] [blame] | 1047 | * | 
|  | 1048 | *	If we come across a stale, pinned, locked buffer, we know that we are | 
|  | 1049 | *	being asked to lock a buffer that has been reallocated. Because it is | 
|  | 1050 | *	pinned, we know that the log has not been pushed to disk and hence it | 
|  | 1051 | *	will still be locked.  Rather than continuing to have trylock attempts | 
|  | 1052 | *	fail until someone else pushes the log, push it ourselves before | 
|  | 1053 | *	returning.  This means that the xfsaild will not get stuck trying | 
|  | 1054 | *	to push on stale inode buffers. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1055 | */ | 
|  | 1056 | int | 
| Christoph Hellwig | 0c842ad | 2011-07-08 14:36:19 +0200 | [diff] [blame] | 1057 | xfs_buf_trylock( | 
|  | 1058 | struct xfs_buf		*bp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1059 | { | 
|  | 1060 | int			locked; | 
|  | 1061 |  | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1062 | locked = down_trylock(&bp->b_sema) == 0; | 
| Darrick J. Wong | 479c641 | 2016-06-21 11:53:28 +1000 | [diff] [blame] | 1063 | if (locked) { | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1064 | XB_SET_OWNER(bp); | 
| Darrick J. Wong | 479c641 | 2016-06-21 11:53:28 +1000 | [diff] [blame] | 1065 | trace_xfs_buf_trylock(bp, _RET_IP_); | 
|  | 1066 | } else { | 
|  | 1067 | trace_xfs_buf_trylock_fail(bp, _RET_IP_); | 
|  | 1068 | } | 
| Christoph Hellwig | 0c842ad | 2011-07-08 14:36:19 +0200 | [diff] [blame] | 1069 | return locked; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1070 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1071 |  | 
|  | 1072 | /* | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 1073 | *	Lock a buffer object. | 
| Dave Chinner | ed3b4d6 | 2010-05-21 12:07:08 +1000 | [diff] [blame] | 1074 | * | 
|  | 1075 | *	If we come across a stale, pinned, locked buffer, we know that we | 
|  | 1076 | *	are being asked to lock a buffer that has been reallocated. Because | 
|  | 1077 | *	it is pinned, we know that the log has not been pushed to disk and | 
|  | 1078 | *	hence it will still be locked. Rather than sleeping until someone | 
|  | 1079 | *	else pushes the log, push it ourselves before trying to get the lock. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1080 | */ | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1081 | void | 
|  | 1082 | xfs_buf_lock( | 
| Christoph Hellwig | 0c842ad | 2011-07-08 14:36:19 +0200 | [diff] [blame] | 1083 | struct xfs_buf		*bp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1084 | { | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 1085 | trace_xfs_buf_lock(bp, _RET_IP_); | 
|  | 1086 |  | 
| Dave Chinner | ed3b4d6 | 2010-05-21 12:07:08 +1000 | [diff] [blame] | 1087 | if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) | 
| Dave Chinner | ebad861 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 1088 | xfs_log_force(bp->b_target->bt_mount, 0); | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1089 | down(&bp->b_sema); | 
|  | 1090 | XB_SET_OWNER(bp); | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 1091 |  | 
|  | 1092 | trace_xfs_buf_lock_done(bp, _RET_IP_); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1093 | } | 
|  | 1094 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1095 | void | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1096 | xfs_buf_unlock( | 
| Christoph Hellwig | 0c842ad | 2011-07-08 14:36:19 +0200 | [diff] [blame] | 1097 | struct xfs_buf		*bp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1098 | { | 
| Brian Foster | 20e8a06 | 2017-04-21 12:40:44 -0700 | [diff] [blame] | 1099 | ASSERT(xfs_buf_islocked(bp)); | 
|  | 1100 |  | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1101 | XB_CLEAR_OWNER(bp); | 
|  | 1102 | up(&bp->b_sema); | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 1103 |  | 
|  | 1104 | trace_xfs_buf_unlock(bp, _RET_IP_); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1105 | } | 
|  | 1106 |  | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1107 | STATIC void | 
|  | 1108 | xfs_buf_wait_unpin( | 
|  | 1109 | xfs_buf_t		*bp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1110 | { | 
|  | 1111 | DECLARE_WAITQUEUE	(wait, current); | 
|  | 1112 |  | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1113 | if (atomic_read(&bp->b_pin_count) == 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1114 | return; | 
|  | 1115 |  | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1116 | add_wait_queue(&bp->b_waiters, &wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1117 | for (;;) { | 
|  | 1118 | set_current_state(TASK_UNINTERRUPTIBLE); | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1119 | if (atomic_read(&bp->b_pin_count) == 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1120 | break; | 
| Jens Axboe | 7eaceac | 2011-03-10 08:52:07 +0100 | [diff] [blame] | 1121 | io_schedule(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1122 | } | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1123 | remove_wait_queue(&bp->b_waiters, &wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1124 | set_current_state(TASK_RUNNING); | 
|  | 1125 | } | 
|  | 1126 |  | 
|  | 1127 | /* | 
|  | 1128 | *	Buffer Utility Routines | 
|  | 1129 | */ | 
|  | 1130 |  | 
| Dave Chinner | e8aaba9 | 2014-10-02 09:04:22 +1000 | [diff] [blame] | 1131 | void | 
|  | 1132 | xfs_buf_ioend( | 
|  | 1133 | struct xfs_buf	*bp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1134 | { | 
| Dave Chinner | e8aaba9 | 2014-10-02 09:04:22 +1000 | [diff] [blame] | 1135 | bool		read = bp->b_flags & XBF_READ; | 
|  | 1136 |  | 
|  | 1137 | trace_xfs_buf_iodone(bp, _RET_IP_); | 
| Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 1138 |  | 
|  | 1139 | bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); | 
| Dave Chinner | d5929de | 2013-02-27 13:25:54 +1100 | [diff] [blame] | 1140 |  | 
| Dave Chinner | 61be9c5 | 2014-10-02 09:04:31 +1000 | [diff] [blame] | 1141 | /* | 
|  | 1142 | * Pull in IO completion errors now. We are guaranteed to be running | 
|  | 1143 | * single threaded, so we don't need the lock to read b_io_error. | 
|  | 1144 | */ | 
|  | 1145 | if (!bp->b_error && bp->b_io_error) | 
|  | 1146 | xfs_buf_ioerror(bp, bp->b_io_error); | 
|  | 1147 |  | 
| Dave Chinner | e8aaba9 | 2014-10-02 09:04:22 +1000 | [diff] [blame] | 1148 | /* Only validate buffers that were read without errors */ | 
|  | 1149 | if (read && !bp->b_error && bp->b_ops) { | 
|  | 1150 | ASSERT(!bp->b_iodone); | 
| Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 1151 | bp->b_ops->verify_read(bp); | 
| Dave Chinner | e8aaba9 | 2014-10-02 09:04:22 +1000 | [diff] [blame] | 1152 | } | 
|  | 1153 |  | 
|  | 1154 | if (!bp->b_error) | 
|  | 1155 | bp->b_flags |= XBF_DONE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1156 |  | 
| Christoph Hellwig | 80f6c29 | 2010-08-18 05:29:11 -0400 | [diff] [blame] | 1157 | if (bp->b_iodone) | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1158 | (*(bp->b_iodone))(bp); | 
|  | 1159 | else if (bp->b_flags & XBF_ASYNC) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1160 | xfs_buf_relse(bp); | 
| Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 1161 | else | 
| Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 1162 | complete(&bp->b_iowait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1163 | } | 
|  | 1164 |  | 
| Dave Chinner | e8aaba9 | 2014-10-02 09:04:22 +1000 | [diff] [blame] | 1165 | static void | 
|  | 1166 | xfs_buf_ioend_work( | 
|  | 1167 | struct work_struct	*work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1168 | { | 
| Dave Chinner | e8aaba9 | 2014-10-02 09:04:22 +1000 | [diff] [blame] | 1169 | struct xfs_buf		*bp = | 
| Brian Foster | b29c70f | 2014-12-04 09:43:17 +1100 | [diff] [blame] | 1170 | container_of(work, xfs_buf_t, b_ioend_work); | 
| Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 1171 |  | 
| Dave Chinner | e8aaba9 | 2014-10-02 09:04:22 +1000 | [diff] [blame] | 1172 | xfs_buf_ioend(bp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1173 | } | 
|  | 1174 |  | 
| Alexander Kuleshov | 211fe1a | 2016-01-04 16:10:42 +1100 | [diff] [blame] | 1175 | static void | 
| Dave Chinner | e8aaba9 | 2014-10-02 09:04:22 +1000 | [diff] [blame] | 1176 | xfs_buf_ioend_async( | 
|  | 1177 | struct xfs_buf	*bp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1178 | { | 
| Brian Foster | b29c70f | 2014-12-04 09:43:17 +1100 | [diff] [blame] | 1179 | INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work); | 
|  | 1180 | queue_work(bp->b_ioend_wq, &bp->b_ioend_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1181 | } | 
|  | 1182 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1183 | void | 
| Darrick J. Wong | 31ca03c | 2018-01-08 10:51:02 -0800 | [diff] [blame] | 1184 | __xfs_buf_ioerror( | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1185 | xfs_buf_t		*bp, | 
| Darrick J. Wong | 31ca03c | 2018-01-08 10:51:02 -0800 | [diff] [blame] | 1186 | int			error, | 
|  | 1187 | xfs_failaddr_t		failaddr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1188 | { | 
| Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1189 | ASSERT(error <= 0 && error >= -1000); | 
|  | 1190 | bp->b_error = error; | 
| Darrick J. Wong | 31ca03c | 2018-01-08 10:51:02 -0800 | [diff] [blame] | 1191 | trace_xfs_buf_ioerror(bp, error, failaddr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1192 | } | 
|  | 1193 |  | 
| Christoph Hellwig | 901796a | 2011-10-10 16:52:49 +0000 | [diff] [blame] | 1194 | void | 
|  | 1195 | xfs_buf_ioerror_alert( | 
|  | 1196 | struct xfs_buf		*bp, | 
|  | 1197 | const char		*func) | 
|  | 1198 | { | 
|  | 1199 | xfs_alert(bp->b_target->bt_mount, | 
| Darrick J. Wong | c219b01 | 2018-01-08 11:39:18 -0800 | [diff] [blame] | 1200 | "metadata I/O error in \"%s\" at daddr 0x%llx len %d error %d", | 
|  | 1201 | func, (uint64_t)XFS_BUF_ADDR(bp), bp->b_length, | 
|  | 1202 | -bp->b_error); | 
| Christoph Hellwig | 901796a | 2011-10-10 16:52:49 +0000 | [diff] [blame] | 1203 | } | 
|  | 1204 |  | 
| Christoph Hellwig | a2dcf5d | 2012-07-13 02:24:10 -0400 | [diff] [blame] | 1205 | int | 
|  | 1206 | xfs_bwrite( | 
|  | 1207 | struct xfs_buf		*bp) | 
|  | 1208 | { | 
|  | 1209 | int			error; | 
|  | 1210 |  | 
|  | 1211 | ASSERT(xfs_buf_islocked(bp)); | 
|  | 1212 |  | 
|  | 1213 | bp->b_flags |= XBF_WRITE; | 
| Dave Chinner | 2718775 | 2014-10-02 09:04:56 +1000 | [diff] [blame] | 1214 | bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | | 
|  | 1215 | XBF_WRITE_FAIL | XBF_DONE); | 
| Christoph Hellwig | a2dcf5d | 2012-07-13 02:24:10 -0400 | [diff] [blame] | 1216 |  | 
| Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 1217 | error = xfs_buf_submit_wait(bp); | 
| Christoph Hellwig | a2dcf5d | 2012-07-13 02:24:10 -0400 | [diff] [blame] | 1218 | if (error) { | 
|  | 1219 | xfs_force_shutdown(bp->b_target->bt_mount, | 
|  | 1220 | SHUTDOWN_META_IO_ERROR); | 
|  | 1221 | } | 
|  | 1222 | return error; | 
|  | 1223 | } | 
|  | 1224 |  | 
| Brian Foster | 9bdd9bd | 2016-05-18 10:56:41 +1000 | [diff] [blame] | 1225 | static void | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1226 | xfs_buf_bio_end_io( | 
| Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 1227 | struct bio		*bio) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1228 | { | 
| Brian Foster | 9bdd9bd | 2016-05-18 10:56:41 +1000 | [diff] [blame] | 1229 | struct xfs_buf		*bp = (struct xfs_buf *)bio->bi_private; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1230 |  | 
| Dave Chinner | 37eb17e | 2012-11-12 22:09:46 +1100 | [diff] [blame] | 1231 | /* | 
|  | 1232 | * don't overwrite existing errors - otherwise we can lose errors on | 
|  | 1233 | * buffers that require multiple bios to complete. | 
|  | 1234 | */ | 
| Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 1235 | if (bio->bi_status) { | 
|  | 1236 | int error = blk_status_to_errno(bio->bi_status); | 
|  | 1237 |  | 
|  | 1238 | cmpxchg(&bp->b_io_error, 0, error); | 
|  | 1239 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1240 |  | 
| Dave Chinner | 37eb17e | 2012-11-12 22:09:46 +1100 | [diff] [blame] | 1241 | if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) | 
| James Bottomley | 73c77e2 | 2010-01-25 11:42:24 -0600 | [diff] [blame] | 1242 | invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); | 
|  | 1243 |  | 
| Dave Chinner | e8aaba9 | 2014-10-02 09:04:22 +1000 | [diff] [blame] | 1244 | if (atomic_dec_and_test(&bp->b_io_remaining) == 1) | 
|  | 1245 | xfs_buf_ioend_async(bp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1246 | bio_put(bio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1247 | } | 
|  | 1248 |  | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 1249 | static void | 
|  | 1250 | xfs_buf_ioapply_map( | 
|  | 1251 | struct xfs_buf	*bp, | 
|  | 1252 | int		map, | 
|  | 1253 | int		*buf_offset, | 
|  | 1254 | int		*count, | 
| Mike Christie | 50bfcd0 | 2016-06-05 14:31:57 -0500 | [diff] [blame] | 1255 | int		op, | 
|  | 1256 | int		op_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1257 | { | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 1258 | int		page_index; | 
|  | 1259 | int		total_nr_pages = bp->b_page_count; | 
|  | 1260 | int		nr_pages; | 
|  | 1261 | struct bio	*bio; | 
|  | 1262 | sector_t	sector =  bp->b_maps[map].bm_bn; | 
|  | 1263 | int		size; | 
|  | 1264 | int		offset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1265 |  | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 1266 | /* skip the pages in the buffer before the start offset */ | 
|  | 1267 | page_index = 0; | 
|  | 1268 | offset = *buf_offset; | 
|  | 1269 | while (offset >= PAGE_SIZE) { | 
|  | 1270 | page_index++; | 
|  | 1271 | offset -= PAGE_SIZE; | 
| Christoph Hellwig | f538d4d | 2005-11-02 10:26:59 +1100 | [diff] [blame] | 1272 | } | 
|  | 1273 |  | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 1274 | /* | 
|  | 1275 | * Limit the IO size to the length of the current vector, and update the | 
|  | 1276 | * remaining IO count for the next time around. | 
|  | 1277 | */ | 
|  | 1278 | size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count); | 
|  | 1279 | *count -= size; | 
|  | 1280 | *buf_offset += size; | 
| Christoph Hellwig | 34951f5 | 2011-07-26 15:06:44 +0000 | [diff] [blame] | 1281 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1282 | next_chunk: | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1283 | atomic_inc(&bp->b_io_remaining); | 
| Ming Lei | c908e38 | 2016-05-30 21:34:33 +0800 | [diff] [blame] | 1284 | nr_pages = min(total_nr_pages, BIO_MAX_PAGES); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1285 |  | 
|  | 1286 | bio = bio_alloc(GFP_NOIO, nr_pages); | 
| Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 1287 | bio_set_dev(bio, bp->b_target->bt_bdev); | 
| Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 1288 | bio->bi_iter.bi_sector = sector; | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1289 | bio->bi_end_io = xfs_buf_bio_end_io; | 
|  | 1290 | bio->bi_private = bp; | 
| Mike Christie | 50bfcd0 | 2016-06-05 14:31:57 -0500 | [diff] [blame] | 1291 | bio_set_op_attrs(bio, op, op_flags); | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 1292 |  | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 1293 | for (; size && nr_pages; nr_pages--, page_index++) { | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 1294 | int	rbytes, nbytes = PAGE_SIZE - offset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1295 |  | 
|  | 1296 | if (nbytes > size) | 
|  | 1297 | nbytes = size; | 
|  | 1298 |  | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 1299 | rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes, | 
|  | 1300 | offset); | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1301 | if (rbytes < nbytes) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1302 | break; | 
|  | 1303 |  | 
|  | 1304 | offset = 0; | 
| Dave Chinner | aa0e883 | 2012-04-23 15:58:52 +1000 | [diff] [blame] | 1305 | sector += BTOBB(nbytes); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1306 | size -= nbytes; | 
|  | 1307 | total_nr_pages--; | 
|  | 1308 | } | 
|  | 1309 |  | 
| Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 1310 | if (likely(bio->bi_iter.bi_size)) { | 
| James Bottomley | 73c77e2 | 2010-01-25 11:42:24 -0600 | [diff] [blame] | 1311 | if (xfs_buf_is_vmapped(bp)) { | 
|  | 1312 | flush_kernel_vmap_range(bp->b_addr, | 
|  | 1313 | xfs_buf_vmap_len(bp)); | 
|  | 1314 | } | 
| Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 1315 | submit_bio(bio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1316 | if (size) | 
|  | 1317 | goto next_chunk; | 
|  | 1318 | } else { | 
| Dave Chinner | 37eb17e | 2012-11-12 22:09:46 +1100 | [diff] [blame] | 1319 | /* | 
|  | 1320 | * This is guaranteed not to be the last io reference count | 
| Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 1321 | * because the caller (xfs_buf_submit) holds a count itself. | 
| Dave Chinner | 37eb17e | 2012-11-12 22:09:46 +1100 | [diff] [blame] | 1322 | */ | 
|  | 1323 | atomic_dec(&bp->b_io_remaining); | 
| Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1324 | xfs_buf_ioerror(bp, -EIO); | 
| Dave Chinner | ec53d1d | 2010-07-20 17:52:59 +1000 | [diff] [blame] | 1325 | bio_put(bio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1326 | } | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 1327 |  | 
|  | 1328 | } | 
|  | 1329 |  | 
|  | 1330 | STATIC void | 
|  | 1331 | _xfs_buf_ioapply( | 
|  | 1332 | struct xfs_buf	*bp) | 
|  | 1333 | { | 
|  | 1334 | struct blk_plug	plug; | 
| Mike Christie | 50bfcd0 | 2016-06-05 14:31:57 -0500 | [diff] [blame] | 1335 | int		op; | 
|  | 1336 | int		op_flags = 0; | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 1337 | int		offset; | 
|  | 1338 | int		size; | 
|  | 1339 | int		i; | 
|  | 1340 |  | 
| Dave Chinner | c163f9a | 2013-03-12 23:30:34 +1100 | [diff] [blame] | 1341 | /* | 
|  | 1342 | * Make sure we capture only current IO errors rather than stale errors | 
|  | 1343 | * left over from previous use of the buffer (e.g. failed readahead). | 
|  | 1344 | */ | 
|  | 1345 | bp->b_error = 0; | 
|  | 1346 |  | 
| Brian Foster | b29c70f | 2014-12-04 09:43:17 +1100 | [diff] [blame] | 1347 | /* | 
|  | 1348 | * Initialize the I/O completion workqueue if we haven't yet or the | 
|  | 1349 | * submitter has not opted to specify a custom one. | 
|  | 1350 | */ | 
|  | 1351 | if (!bp->b_ioend_wq) | 
|  | 1352 | bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue; | 
|  | 1353 |  | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 1354 | if (bp->b_flags & XBF_WRITE) { | 
| Mike Christie | 50bfcd0 | 2016-06-05 14:31:57 -0500 | [diff] [blame] | 1355 | op = REQ_OP_WRITE; | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 1356 | if (bp->b_flags & XBF_SYNCIO) | 
| Christoph Hellwig | 70fd761 | 2016-11-01 07:40:10 -0600 | [diff] [blame] | 1357 | op_flags = REQ_SYNC; | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 1358 | if (bp->b_flags & XBF_FUA) | 
| Mike Christie | 50bfcd0 | 2016-06-05 14:31:57 -0500 | [diff] [blame] | 1359 | op_flags |= REQ_FUA; | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 1360 | if (bp->b_flags & XBF_FLUSH) | 
| Mike Christie | 28a8f0d | 2016-06-05 14:32:25 -0500 | [diff] [blame] | 1361 | op_flags |= REQ_PREFLUSH; | 
| Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 1362 |  | 
|  | 1363 | /* | 
|  | 1364 | * Run the write verifier callback function if it exists. If | 
|  | 1365 | * this function fails it will mark the buffer with an error and | 
|  | 1366 | * the IO should not be dispatched. | 
|  | 1367 | */ | 
|  | 1368 | if (bp->b_ops) { | 
|  | 1369 | bp->b_ops->verify_write(bp); | 
|  | 1370 | if (bp->b_error) { | 
|  | 1371 | xfs_force_shutdown(bp->b_target->bt_mount, | 
|  | 1372 | SHUTDOWN_CORRUPT_INCORE); | 
|  | 1373 | return; | 
|  | 1374 | } | 
| Dave Chinner | 400b9d8 | 2014-08-04 12:42:40 +1000 | [diff] [blame] | 1375 | } else if (bp->b_bn != XFS_BUF_DADDR_NULL) { | 
|  | 1376 | struct xfs_mount *mp = bp->b_target->bt_mount; | 
|  | 1377 |  | 
|  | 1378 | /* | 
|  | 1379 | * non-crc filesystems don't attach verifiers during | 
|  | 1380 | * log recovery, so don't warn for such filesystems. | 
|  | 1381 | */ | 
|  | 1382 | if (xfs_sb_version_hascrc(&mp->m_sb)) { | 
|  | 1383 | xfs_warn(mp, | 
| Darrick J. Wong | c219b01 | 2018-01-08 11:39:18 -0800 | [diff] [blame] | 1384 | "%s: no buf ops on daddr 0x%llx len %d", | 
| Dave Chinner | 400b9d8 | 2014-08-04 12:42:40 +1000 | [diff] [blame] | 1385 | __func__, bp->b_bn, bp->b_length); | 
| Darrick J. Wong | 9c712a1 | 2018-01-08 10:51:26 -0800 | [diff] [blame] | 1386 | xfs_hex_dump(bp->b_addr, | 
|  | 1387 | XFS_CORRUPTION_DUMP_LEN); | 
| Dave Chinner | 400b9d8 | 2014-08-04 12:42:40 +1000 | [diff] [blame] | 1388 | dump_stack(); | 
|  | 1389 | } | 
| Dave Chinner | 1813dd6 | 2012-11-14 17:54:40 +1100 | [diff] [blame] | 1390 | } | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 1391 | } else if (bp->b_flags & XBF_READ_AHEAD) { | 
| Mike Christie | 50bfcd0 | 2016-06-05 14:31:57 -0500 | [diff] [blame] | 1392 | op = REQ_OP_READ; | 
|  | 1393 | op_flags = REQ_RAHEAD; | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 1394 | } else { | 
| Mike Christie | 50bfcd0 | 2016-06-05 14:31:57 -0500 | [diff] [blame] | 1395 | op = REQ_OP_READ; | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 1396 | } | 
|  | 1397 |  | 
|  | 1398 | /* we only use the buffer cache for meta-data */ | 
| Mike Christie | 50bfcd0 | 2016-06-05 14:31:57 -0500 | [diff] [blame] | 1399 | op_flags |= REQ_META; | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 1400 |  | 
|  | 1401 | /* | 
|  | 1402 | * Walk all the vectors issuing IO on them. Set up the initial offset | 
|  | 1403 | * into the buffer and the desired IO size before we start - | 
|  | 1404 | * _xfs_buf_ioapply_vec() will modify them appropriately for each | 
|  | 1405 | * subsequent call. | 
|  | 1406 | */ | 
|  | 1407 | offset = bp->b_offset; | 
|  | 1408 | size = BBTOB(bp->b_io_length); | 
|  | 1409 | blk_start_plug(&plug); | 
|  | 1410 | for (i = 0; i < bp->b_map_count; i++) { | 
| Mike Christie | 50bfcd0 | 2016-06-05 14:31:57 -0500 | [diff] [blame] | 1411 | xfs_buf_ioapply_map(bp, i, &offset, &size, op, op_flags); | 
| Dave Chinner | 3e85c86 | 2012-06-22 18:50:09 +1000 | [diff] [blame] | 1412 | if (bp->b_error) | 
|  | 1413 | break; | 
|  | 1414 | if (size <= 0) | 
|  | 1415 | break;	/* all done */ | 
|  | 1416 | } | 
|  | 1417 | blk_finish_plug(&plug); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1418 | } | 
|  | 1419 |  | 
| Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 1420 | /* | 
|  | 1421 | * Asynchronous IO submission path. This transfers the buffer lock ownership and | 
|  | 1422 | * the current reference to the IO. It is not safe to reference the buffer after | 
|  | 1423 | * a call to this function unless the caller holds an additional reference | 
|  | 1424 | * itself. | 
|  | 1425 | */ | 
| Dave Chinner | 0e95f19 | 2012-04-23 15:58:46 +1000 | [diff] [blame] | 1426 | void | 
| Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 1427 | xfs_buf_submit( | 
|  | 1428 | struct xfs_buf	*bp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1429 | { | 
| Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 1430 | trace_xfs_buf_submit(bp, _RET_IP_); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1431 |  | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1432 | ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); | 
| Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 1433 | ASSERT(bp->b_flags & XBF_ASYNC); | 
|  | 1434 |  | 
|  | 1435 | /* on shutdown we stale and complete the buffer immediately */ | 
|  | 1436 | if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { | 
|  | 1437 | xfs_buf_ioerror(bp, -EIO); | 
|  | 1438 | bp->b_flags &= ~XBF_DONE; | 
|  | 1439 | xfs_buf_stale(bp); | 
|  | 1440 | xfs_buf_ioend(bp); | 
|  | 1441 | return; | 
|  | 1442 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1443 |  | 
| Christoph Hellwig | 375ec69 | 2011-08-23 08:28:03 +0000 | [diff] [blame] | 1444 | if (bp->b_flags & XBF_WRITE) | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1445 | xfs_buf_wait_unpin(bp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1446 |  | 
| Dave Chinner | 61be9c5 | 2014-10-02 09:04:31 +1000 | [diff] [blame] | 1447 | /* clear the internal error state to avoid spurious errors */ | 
|  | 1448 | bp->b_io_error = 0; | 
|  | 1449 |  | 
| Eric Sandeen | 8d6c121 | 2014-04-17 08:15:28 +1000 | [diff] [blame] | 1450 | /* | 
| Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 1451 | * The caller's reference is released during I/O completion. | 
|  | 1452 | * This occurs some time after the last b_io_remaining reference is | 
|  | 1453 | * released, so after we drop our Io reference we have to have some | 
|  | 1454 | * other reference to ensure the buffer doesn't go away from underneath | 
|  | 1455 | * us. Take a direct reference to ensure we have safe access to the | 
|  | 1456 | * buffer until we are finished with it. | 
| Dave Chinner | e11bb80 | 2014-10-02 09:04:11 +1000 | [diff] [blame] | 1457 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1458 | xfs_buf_hold(bp); | 
|  | 1459 |  | 
| Eric Sandeen | 8d6c121 | 2014-04-17 08:15:28 +1000 | [diff] [blame] | 1460 | /* | 
| Dave Chinner | e11bb80 | 2014-10-02 09:04:11 +1000 | [diff] [blame] | 1461 | * Set the count to 1 initially, this will stop an I/O completion | 
|  | 1462 | * callout which happens before we have started all the I/O from calling | 
|  | 1463 | * xfs_buf_ioend too early. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1464 | */ | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1465 | atomic_set(&bp->b_io_remaining, 1); | 
| Brian Foster | 9c7504a | 2016-07-20 11:15:28 +1000 | [diff] [blame] | 1466 | xfs_buf_ioacct_inc(bp); | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1467 | _xfs_buf_ioapply(bp); | 
| Dave Chinner | e11bb80 | 2014-10-02 09:04:11 +1000 | [diff] [blame] | 1468 |  | 
| Eric Sandeen | 8d6c121 | 2014-04-17 08:15:28 +1000 | [diff] [blame] | 1469 | /* | 
| Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 1470 | * If _xfs_buf_ioapply failed, we can get back here with only the IO | 
|  | 1471 | * reference we took above. If we drop it to zero, run completion so | 
|  | 1472 | * that we don't return to the caller with completion still pending. | 
| Eric Sandeen | 8d6c121 | 2014-04-17 08:15:28 +1000 | [diff] [blame] | 1473 | */ | 
| Dave Chinner | e8aaba9 | 2014-10-02 09:04:22 +1000 | [diff] [blame] | 1474 | if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { | 
| Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 1475 | if (bp->b_error) | 
| Dave Chinner | e8aaba9 | 2014-10-02 09:04:22 +1000 | [diff] [blame] | 1476 | xfs_buf_ioend(bp); | 
|  | 1477 | else | 
|  | 1478 | xfs_buf_ioend_async(bp); | 
|  | 1479 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1480 |  | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1481 | xfs_buf_rele(bp); | 
| Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 1482 | /* Note: it is not safe to reference bp now we've dropped our ref */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1483 | } | 
|  | 1484 |  | 
|  | 1485 | /* | 
| Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 1486 | * Synchronous buffer IO submission path, read or write. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1487 | */ | 
|  | 1488 | int | 
| Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 1489 | xfs_buf_submit_wait( | 
|  | 1490 | struct xfs_buf	*bp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1491 | { | 
| Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 1492 | int		error; | 
|  | 1493 |  | 
|  | 1494 | trace_xfs_buf_submit_wait(bp, _RET_IP_); | 
|  | 1495 |  | 
|  | 1496 | ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC))); | 
|  | 1497 |  | 
|  | 1498 | if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { | 
|  | 1499 | xfs_buf_ioerror(bp, -EIO); | 
|  | 1500 | xfs_buf_stale(bp); | 
|  | 1501 | bp->b_flags &= ~XBF_DONE; | 
|  | 1502 | return -EIO; | 
|  | 1503 | } | 
|  | 1504 |  | 
|  | 1505 | if (bp->b_flags & XBF_WRITE) | 
|  | 1506 | xfs_buf_wait_unpin(bp); | 
|  | 1507 |  | 
|  | 1508 | /* clear the internal error state to avoid spurious errors */ | 
|  | 1509 | bp->b_io_error = 0; | 
|  | 1510 |  | 
|  | 1511 | /* | 
|  | 1512 | * For synchronous IO, the IO does not inherit the submitters reference | 
|  | 1513 | * count, nor the buffer lock. Hence we cannot release the reference we | 
|  | 1514 | * are about to take until we've waited for all IO completion to occur, | 
|  | 1515 | * including any xfs_buf_ioend_async() work that may be pending. | 
|  | 1516 | */ | 
|  | 1517 | xfs_buf_hold(bp); | 
|  | 1518 |  | 
|  | 1519 | /* | 
|  | 1520 | * Set the count to 1 initially, this will stop an I/O completion | 
|  | 1521 | * callout which happens before we have started all the I/O from calling | 
|  | 1522 | * xfs_buf_ioend too early. | 
|  | 1523 | */ | 
|  | 1524 | atomic_set(&bp->b_io_remaining, 1); | 
|  | 1525 | _xfs_buf_ioapply(bp); | 
|  | 1526 |  | 
|  | 1527 | /* | 
|  | 1528 | * make sure we run completion synchronously if it raced with us and is | 
|  | 1529 | * already complete. | 
|  | 1530 | */ | 
|  | 1531 | if (atomic_dec_and_test(&bp->b_io_remaining) == 1) | 
|  | 1532 | xfs_buf_ioend(bp); | 
|  | 1533 |  | 
|  | 1534 | /* wait for completion before gathering the error from the buffer */ | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 1535 | trace_xfs_buf_iowait(bp, _RET_IP_); | 
| Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 1536 | wait_for_completion(&bp->b_iowait); | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 1537 | trace_xfs_buf_iowait_done(bp, _RET_IP_); | 
| Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 1538 | error = bp->b_error; | 
|  | 1539 |  | 
|  | 1540 | /* | 
|  | 1541 | * all done now, we can release the hold that keeps the buffer | 
|  | 1542 | * referenced for the entire IO. | 
|  | 1543 | */ | 
|  | 1544 | xfs_buf_rele(bp); | 
|  | 1545 | return error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1546 | } | 
|  | 1547 |  | 
| Christoph Hellwig | 88ee2df | 2015-06-22 09:44:29 +1000 | [diff] [blame] | 1548 | void * | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1549 | xfs_buf_offset( | 
| Christoph Hellwig | 88ee2df | 2015-06-22 09:44:29 +1000 | [diff] [blame] | 1550 | struct xfs_buf		*bp, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1551 | size_t			offset) | 
|  | 1552 | { | 
|  | 1553 | struct page		*page; | 
|  | 1554 |  | 
| Dave Chinner | 611c994 | 2012-04-23 15:59:07 +1000 | [diff] [blame] | 1555 | if (bp->b_addr) | 
| Chandra Seetharaman | 6292604 | 2011-07-22 23:40:15 +0000 | [diff] [blame] | 1556 | return bp->b_addr + offset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1557 |  | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1558 | offset += bp->b_offset; | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 1559 | page = bp->b_pages[offset >> PAGE_SHIFT]; | 
| Christoph Hellwig | 88ee2df | 2015-06-22 09:44:29 +1000 | [diff] [blame] | 1560 | return page_address(page) + (offset & (PAGE_SIZE-1)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1561 | } | 
|  | 1562 |  | 
|  | 1563 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1564 | *	Move data into or out of a buffer. | 
|  | 1565 | */ | 
|  | 1566 | void | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1567 | xfs_buf_iomove( | 
|  | 1568 | xfs_buf_t		*bp,	/* buffer to process		*/ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1569 | size_t			boff,	/* starting buffer offset	*/ | 
|  | 1570 | size_t			bsize,	/* length to copy		*/ | 
| Dave Chinner | b9c4864 | 2010-01-20 10:47:39 +1100 | [diff] [blame] | 1571 | void			*data,	/* data address			*/ | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1572 | xfs_buf_rw_t		mode)	/* read/write/zero flag		*/ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1573 | { | 
| Dave Chinner | 795cac7 | 2012-04-23 15:58:53 +1000 | [diff] [blame] | 1574 | size_t			bend; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1575 |  | 
|  | 1576 | bend = boff + bsize; | 
|  | 1577 | while (boff < bend) { | 
| Dave Chinner | 795cac7 | 2012-04-23 15:58:53 +1000 | [diff] [blame] | 1578 | struct page	*page; | 
|  | 1579 | int		page_index, page_offset, csize; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1580 |  | 
| Dave Chinner | 795cac7 | 2012-04-23 15:58:53 +1000 | [diff] [blame] | 1581 | page_index = (boff + bp->b_offset) >> PAGE_SHIFT; | 
|  | 1582 | page_offset = (boff + bp->b_offset) & ~PAGE_MASK; | 
|  | 1583 | page = bp->b_pages[page_index]; | 
|  | 1584 | csize = min_t(size_t, PAGE_SIZE - page_offset, | 
|  | 1585 | BBTOB(bp->b_io_length) - boff); | 
|  | 1586 |  | 
|  | 1587 | ASSERT((csize + page_offset) <= PAGE_SIZE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1588 |  | 
|  | 1589 | switch (mode) { | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1590 | case XBRW_ZERO: | 
| Dave Chinner | 795cac7 | 2012-04-23 15:58:53 +1000 | [diff] [blame] | 1591 | memset(page_address(page) + page_offset, 0, csize); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1592 | break; | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1593 | case XBRW_READ: | 
| Dave Chinner | 795cac7 | 2012-04-23 15:58:53 +1000 | [diff] [blame] | 1594 | memcpy(data, page_address(page) + page_offset, csize); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1595 | break; | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1596 | case XBRW_WRITE: | 
| Dave Chinner | 795cac7 | 2012-04-23 15:58:53 +1000 | [diff] [blame] | 1597 | memcpy(page_address(page) + page_offset, data, csize); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1598 | } | 
|  | 1599 |  | 
|  | 1600 | boff += csize; | 
|  | 1601 | data += csize; | 
|  | 1602 | } | 
|  | 1603 | } | 
|  | 1604 |  | 
|  | 1605 | /* | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1606 | *	Handling of buffer targets (buftargs). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1607 | */ | 
|  | 1608 |  | 
|  | 1609 | /* | 
| Dave Chinner | 430cbeb | 2010-12-02 16:30:55 +1100 | [diff] [blame] | 1610 | * Wait for any bufs with callbacks that have been submitted but have not yet | 
|  | 1611 | * returned. These buffers will have an elevated hold count, so wait on those | 
|  | 1612 | * while freeing all the buffers only held by the LRU. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1613 | */ | 
| Dave Chinner | e80dfa1 | 2013-08-28 10:18:05 +1000 | [diff] [blame] | 1614 | static enum lru_status | 
|  | 1615 | xfs_buftarg_wait_rele( | 
|  | 1616 | struct list_head	*item, | 
| Vladimir Davydov | 3f97b16 | 2015-02-12 14:59:35 -0800 | [diff] [blame] | 1617 | struct list_lru_one	*lru, | 
| Dave Chinner | e80dfa1 | 2013-08-28 10:18:05 +1000 | [diff] [blame] | 1618 | spinlock_t		*lru_lock, | 
|  | 1619 | void			*arg) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1620 |  | 
| Dave Chinner | e80dfa1 | 2013-08-28 10:18:05 +1000 | [diff] [blame] | 1621 | { | 
|  | 1622 | struct xfs_buf		*bp = container_of(item, struct xfs_buf, b_lru); | 
| Dave Chinner | a408235 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 1623 | struct list_head	*dispose = arg; | 
| Dave Chinner | e80dfa1 | 2013-08-28 10:18:05 +1000 | [diff] [blame] | 1624 |  | 
|  | 1625 | if (atomic_read(&bp->b_hold) > 1) { | 
| Dave Chinner | a408235 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 1626 | /* need to wait, so skip it this pass */ | 
| Dave Chinner | e80dfa1 | 2013-08-28 10:18:05 +1000 | [diff] [blame] | 1627 | trace_xfs_buf_wait_buftarg(bp, _RET_IP_); | 
| Dave Chinner | a408235 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 1628 | return LRU_SKIP; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1629 | } | 
| Dave Chinner | a408235 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 1630 | if (!spin_trylock(&bp->b_lock)) | 
|  | 1631 | return LRU_SKIP; | 
| Dave Chinner | e80dfa1 | 2013-08-28 10:18:05 +1000 | [diff] [blame] | 1632 |  | 
| Dave Chinner | a408235 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 1633 | /* | 
|  | 1634 | * clear the LRU reference count so the buffer doesn't get | 
|  | 1635 | * ignored in xfs_buf_rele(). | 
|  | 1636 | */ | 
|  | 1637 | atomic_set(&bp->b_lru_ref, 0); | 
|  | 1638 | bp->b_state |= XFS_BSTATE_DISPOSE; | 
| Vladimir Davydov | 3f97b16 | 2015-02-12 14:59:35 -0800 | [diff] [blame] | 1639 | list_lru_isolate_move(lru, item, dispose); | 
| Dave Chinner | a408235 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 1640 | spin_unlock(&bp->b_lock); | 
|  | 1641 | return LRU_REMOVED; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1642 | } | 
|  | 1643 |  | 
| Dave Chinner | e80dfa1 | 2013-08-28 10:18:05 +1000 | [diff] [blame] | 1644 | void | 
|  | 1645 | xfs_wait_buftarg( | 
|  | 1646 | struct xfs_buftarg	*btp) | 
|  | 1647 | { | 
| Dave Chinner | a408235 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 1648 | LIST_HEAD(dispose); | 
|  | 1649 | int loop = 0; | 
|  | 1650 |  | 
| Dave Chinner | 85bec54 | 2016-01-19 08:28:10 +1100 | [diff] [blame] | 1651 | /* | 
| Brian Foster | 9c7504a | 2016-07-20 11:15:28 +1000 | [diff] [blame] | 1652 | * First wait on the buftarg I/O count for all in-flight buffers to be | 
|  | 1653 | * released. This is critical as new buffers do not make the LRU until | 
|  | 1654 | * they are released. | 
|  | 1655 | * | 
|  | 1656 | * Next, flush the buffer workqueue to ensure all completion processing | 
|  | 1657 | * has finished. Just waiting on buffer locks is not sufficient for | 
|  | 1658 | * async IO as the reference count held over IO is not released until | 
|  | 1659 | * after the buffer lock is dropped. Hence we need to ensure here that | 
|  | 1660 | * all reference counts have been dropped before we start walking the | 
|  | 1661 | * LRU list. | 
| Dave Chinner | 85bec54 | 2016-01-19 08:28:10 +1100 | [diff] [blame] | 1662 | */ | 
| Brian Foster | 9c7504a | 2016-07-20 11:15:28 +1000 | [diff] [blame] | 1663 | while (percpu_counter_sum(&btp->bt_io_count)) | 
|  | 1664 | delay(100); | 
| Brian Foster | 800b269 | 2016-08-26 16:01:59 +1000 | [diff] [blame] | 1665 | flush_workqueue(btp->bt_mount->m_buf_workqueue); | 
| Dave Chinner | 85bec54 | 2016-01-19 08:28:10 +1100 | [diff] [blame] | 1666 |  | 
| Dave Chinner | a408235 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 1667 | /* loop until there is nothing left on the lru list. */ | 
|  | 1668 | while (list_lru_count(&btp->bt_lru)) { | 
| Dave Chinner | e80dfa1 | 2013-08-28 10:18:05 +1000 | [diff] [blame] | 1669 | list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele, | 
| Dave Chinner | a408235 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 1670 | &dispose, LONG_MAX); | 
|  | 1671 |  | 
|  | 1672 | while (!list_empty(&dispose)) { | 
|  | 1673 | struct xfs_buf *bp; | 
|  | 1674 | bp = list_first_entry(&dispose, struct xfs_buf, b_lru); | 
|  | 1675 | list_del_init(&bp->b_lru); | 
| Dave Chinner | ac8809f | 2013-12-12 16:34:38 +1100 | [diff] [blame] | 1676 | if (bp->b_flags & XBF_WRITE_FAIL) { | 
|  | 1677 | xfs_alert(btp->bt_mount, | 
| Darrick J. Wong | c219b01 | 2018-01-08 11:39:18 -0800 | [diff] [blame] | 1678 | "Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!", | 
| Dave Chinner | ac8809f | 2013-12-12 16:34:38 +1100 | [diff] [blame] | 1679 | (long long)bp->b_bn); | 
| Joe Perches | f41febd | 2015-07-29 11:52:04 +1000 | [diff] [blame] | 1680 | xfs_alert(btp->bt_mount, | 
|  | 1681 | "Please run xfs_repair to determine the extent of the problem."); | 
| Dave Chinner | ac8809f | 2013-12-12 16:34:38 +1100 | [diff] [blame] | 1682 | } | 
| Dave Chinner | a408235 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 1683 | xfs_buf_rele(bp); | 
|  | 1684 | } | 
|  | 1685 | if (loop++ != 0) | 
|  | 1686 | delay(100); | 
|  | 1687 | } | 
| Dave Chinner | e80dfa1 | 2013-08-28 10:18:05 +1000 | [diff] [blame] | 1688 | } | 
|  | 1689 |  | 
|  | 1690 | static enum lru_status | 
|  | 1691 | xfs_buftarg_isolate( | 
|  | 1692 | struct list_head	*item, | 
| Vladimir Davydov | 3f97b16 | 2015-02-12 14:59:35 -0800 | [diff] [blame] | 1693 | struct list_lru_one	*lru, | 
| Dave Chinner | e80dfa1 | 2013-08-28 10:18:05 +1000 | [diff] [blame] | 1694 | spinlock_t		*lru_lock, | 
|  | 1695 | void			*arg) | 
|  | 1696 | { | 
|  | 1697 | struct xfs_buf		*bp = container_of(item, struct xfs_buf, b_lru); | 
|  | 1698 | struct list_head	*dispose = arg; | 
|  | 1699 |  | 
|  | 1700 | /* | 
| Dave Chinner | a408235 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 1701 | * we are inverting the lru lock/bp->b_lock here, so use a trylock. | 
|  | 1702 | * If we fail to get the lock, just skip it. | 
|  | 1703 | */ | 
|  | 1704 | if (!spin_trylock(&bp->b_lock)) | 
|  | 1705 | return LRU_SKIP; | 
|  | 1706 | /* | 
| Dave Chinner | e80dfa1 | 2013-08-28 10:18:05 +1000 | [diff] [blame] | 1707 | * Decrement the b_lru_ref count unless the value is already | 
|  | 1708 | * zero. If the value is already zero, we need to reclaim the | 
|  | 1709 | * buffer, otherwise it gets another trip through the LRU. | 
|  | 1710 | */ | 
| Dave Chinner | a408235 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 1711 | if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) { | 
|  | 1712 | spin_unlock(&bp->b_lock); | 
| Dave Chinner | e80dfa1 | 2013-08-28 10:18:05 +1000 | [diff] [blame] | 1713 | return LRU_ROTATE; | 
| Dave Chinner | a408235 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 1714 | } | 
| Dave Chinner | e80dfa1 | 2013-08-28 10:18:05 +1000 | [diff] [blame] | 1715 |  | 
| Dave Chinner | a408235 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 1716 | bp->b_state |= XFS_BSTATE_DISPOSE; | 
| Vladimir Davydov | 3f97b16 | 2015-02-12 14:59:35 -0800 | [diff] [blame] | 1717 | list_lru_isolate_move(lru, item, dispose); | 
| Dave Chinner | a408235 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 1718 | spin_unlock(&bp->b_lock); | 
| Dave Chinner | e80dfa1 | 2013-08-28 10:18:05 +1000 | [diff] [blame] | 1719 | return LRU_REMOVED; | 
|  | 1720 | } | 
|  | 1721 |  | 
| Andrew Morton | addbda4 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 1722 | static unsigned long | 
| Dave Chinner | e80dfa1 | 2013-08-28 10:18:05 +1000 | [diff] [blame] | 1723 | xfs_buftarg_shrink_scan( | 
| Dave Chinner | ff57ab2 | 2010-11-30 17:27:57 +1100 | [diff] [blame] | 1724 | struct shrinker		*shrink, | 
| Ying Han | 1495f23 | 2011-05-24 17:12:27 -0700 | [diff] [blame] | 1725 | struct shrink_control	*sc) | 
| David Chinner | a6867a6 | 2006-01-11 15:37:58 +1100 | [diff] [blame] | 1726 | { | 
| Dave Chinner | ff57ab2 | 2010-11-30 17:27:57 +1100 | [diff] [blame] | 1727 | struct xfs_buftarg	*btp = container_of(shrink, | 
|  | 1728 | struct xfs_buftarg, bt_shrinker); | 
| Dave Chinner | 430cbeb | 2010-12-02 16:30:55 +1100 | [diff] [blame] | 1729 | LIST_HEAD(dispose); | 
| Andrew Morton | addbda4 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 1730 | unsigned long		freed; | 
| Dave Chinner | 430cbeb | 2010-12-02 16:30:55 +1100 | [diff] [blame] | 1731 |  | 
| Vladimir Davydov | 503c358 | 2015-02-12 14:58:47 -0800 | [diff] [blame] | 1732 | freed = list_lru_shrink_walk(&btp->bt_lru, sc, | 
|  | 1733 | xfs_buftarg_isolate, &dispose); | 
| Dave Chinner | 430cbeb | 2010-12-02 16:30:55 +1100 | [diff] [blame] | 1734 |  | 
|  | 1735 | while (!list_empty(&dispose)) { | 
| Dave Chinner | e80dfa1 | 2013-08-28 10:18:05 +1000 | [diff] [blame] | 1736 | struct xfs_buf *bp; | 
| Dave Chinner | 430cbeb | 2010-12-02 16:30:55 +1100 | [diff] [blame] | 1737 | bp = list_first_entry(&dispose, struct xfs_buf, b_lru); | 
|  | 1738 | list_del_init(&bp->b_lru); | 
|  | 1739 | xfs_buf_rele(bp); | 
|  | 1740 | } | 
|  | 1741 |  | 
| Dave Chinner | e80dfa1 | 2013-08-28 10:18:05 +1000 | [diff] [blame] | 1742 | return freed; | 
|  | 1743 | } | 
|  | 1744 |  | 
| Andrew Morton | addbda4 | 2013-08-28 10:18:06 +1000 | [diff] [blame] | 1745 | static unsigned long | 
| Dave Chinner | e80dfa1 | 2013-08-28 10:18:05 +1000 | [diff] [blame] | 1746 | xfs_buftarg_shrink_count( | 
|  | 1747 | struct shrinker		*shrink, | 
|  | 1748 | struct shrink_control	*sc) | 
|  | 1749 | { | 
|  | 1750 | struct xfs_buftarg	*btp = container_of(shrink, | 
|  | 1751 | struct xfs_buftarg, bt_shrinker); | 
| Vladimir Davydov | 503c358 | 2015-02-12 14:58:47 -0800 | [diff] [blame] | 1752 | return list_lru_shrink_count(&btp->bt_lru, sc); | 
| David Chinner | a6867a6 | 2006-01-11 15:37:58 +1100 | [diff] [blame] | 1753 | } | 
|  | 1754 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1755 | void | 
|  | 1756 | xfs_free_buftarg( | 
| Christoph Hellwig | b796313 | 2009-03-03 14:48:37 -0500 | [diff] [blame] | 1757 | struct xfs_mount	*mp, | 
|  | 1758 | struct xfs_buftarg	*btp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1759 | { | 
| Dave Chinner | ff57ab2 | 2010-11-30 17:27:57 +1100 | [diff] [blame] | 1760 | unregister_shrinker(&btp->bt_shrinker); | 
| Brian Foster | 9c7504a | 2016-07-20 11:15:28 +1000 | [diff] [blame] | 1761 | ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0); | 
|  | 1762 | percpu_counter_destroy(&btp->bt_io_count); | 
| Glauber Costa | f5e1dd3 | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 1763 | list_lru_destroy(&btp->bt_lru); | 
| Dave Chinner | ff57ab2 | 2010-11-30 17:27:57 +1100 | [diff] [blame] | 1764 |  | 
| Dave Chinner | 2291dab | 2016-12-09 16:49:54 +1100 | [diff] [blame] | 1765 | xfs_blkdev_issue_flush(btp); | 
| David Chinner | a6867a6 | 2006-01-11 15:37:58 +1100 | [diff] [blame] | 1766 |  | 
| Denys Vlasenko | f0e2d93 | 2008-05-19 16:31:57 +1000 | [diff] [blame] | 1767 | kmem_free(btp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1768 | } | 
|  | 1769 |  | 
| Eric Sandeen | 3fefdee | 2013-11-13 14:53:45 -0600 | [diff] [blame] | 1770 | int | 
|  | 1771 | xfs_setsize_buftarg( | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1772 | xfs_buftarg_t		*btp, | 
| Eric Sandeen | 3fefdee | 2013-11-13 14:53:45 -0600 | [diff] [blame] | 1773 | unsigned int		sectorsize) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1774 | { | 
| Eric Sandeen | 7c71ee7 | 2014-01-21 16:46:23 -0600 | [diff] [blame] | 1775 | /* Set up metadata sector size info */ | 
| Eric Sandeen | 6da5417 | 2014-01-21 16:45:52 -0600 | [diff] [blame] | 1776 | btp->bt_meta_sectorsize = sectorsize; | 
|  | 1777 | btp->bt_meta_sectormask = sectorsize - 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1778 |  | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1779 | if (set_blocksize(btp->bt_bdev, sectorsize)) { | 
| Dave Chinner | 4f10700 | 2011-03-07 10:00:35 +1100 | [diff] [blame] | 1780 | xfs_warn(btp->bt_mount, | 
| Dmitry Monakhov | a1c6f057 | 2015-04-13 16:31:37 +0400 | [diff] [blame] | 1781 | "Cannot set_blocksize to %u on device %pg", | 
|  | 1782 | sectorsize, btp->bt_bdev); | 
| Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1783 | return -EINVAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1784 | } | 
|  | 1785 |  | 
| Eric Sandeen | 7c71ee7 | 2014-01-21 16:46:23 -0600 | [diff] [blame] | 1786 | /* Set up device logical sector size mask */ | 
|  | 1787 | btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev); | 
|  | 1788 | btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1; | 
|  | 1789 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1790 | return 0; | 
|  | 1791 | } | 
|  | 1792 |  | 
|  | 1793 | /* | 
| Eric Sandeen | 3fefdee | 2013-11-13 14:53:45 -0600 | [diff] [blame] | 1794 | * When allocating the initial buffer target we have not yet | 
|  | 1795 | * read in the superblock, so don't know what sized sectors | 
|  | 1796 | * are being used at this early stage.  Play safe. | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1797 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1798 | STATIC int | 
|  | 1799 | xfs_setsize_buftarg_early( | 
|  | 1800 | xfs_buftarg_t		*btp, | 
|  | 1801 | struct block_device	*bdev) | 
|  | 1802 | { | 
| Eric Sandeen | a96c415 | 2014-04-14 19:00:29 +1000 | [diff] [blame] | 1803 | return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1804 | } | 
|  | 1805 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1806 | xfs_buftarg_t * | 
|  | 1807 | xfs_alloc_buftarg( | 
| Dave Chinner | ebad861 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 1808 | struct xfs_mount	*mp, | 
| Dan Williams | 486aff5 | 2017-08-24 15:12:50 -0700 | [diff] [blame] | 1809 | struct block_device	*bdev, | 
|  | 1810 | struct dax_device	*dax_dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1811 | { | 
|  | 1812 | xfs_buftarg_t		*btp; | 
|  | 1813 |  | 
| Dave Chinner | b17cb36 | 2013-05-20 09:51:12 +1000 | [diff] [blame] | 1814 | btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1815 |  | 
| Dave Chinner | ebad861 | 2010-09-22 10:47:20 +1000 | [diff] [blame] | 1816 | btp->bt_mount = mp; | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1817 | btp->bt_dev =  bdev->bd_dev; | 
|  | 1818 | btp->bt_bdev = bdev; | 
| Dan Williams | 486aff5 | 2017-08-24 15:12:50 -0700 | [diff] [blame] | 1819 | btp->bt_daxdev = dax_dev; | 
| Dave Chinner | 0e6e847 | 2011-03-26 09:16:45 +1100 | [diff] [blame] | 1820 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1821 | if (xfs_setsize_buftarg_early(btp, bdev)) | 
| Michal Hocko | d210a98 | 2017-11-23 17:13:40 +0100 | [diff] [blame] | 1822 | goto error_free; | 
| Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 1823 |  | 
|  | 1824 | if (list_lru_init(&btp->bt_lru)) | 
| Michal Hocko | d210a98 | 2017-11-23 17:13:40 +0100 | [diff] [blame] | 1825 | goto error_free; | 
| Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 1826 |  | 
| Brian Foster | 9c7504a | 2016-07-20 11:15:28 +1000 | [diff] [blame] | 1827 | if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL)) | 
| Michal Hocko | d210a98 | 2017-11-23 17:13:40 +0100 | [diff] [blame] | 1828 | goto error_lru; | 
| Brian Foster | 9c7504a | 2016-07-20 11:15:28 +1000 | [diff] [blame] | 1829 |  | 
| Dave Chinner | e80dfa1 | 2013-08-28 10:18:05 +1000 | [diff] [blame] | 1830 | btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count; | 
|  | 1831 | btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan; | 
| Dave Chinner | ff57ab2 | 2010-11-30 17:27:57 +1100 | [diff] [blame] | 1832 | btp->bt_shrinker.seeks = DEFAULT_SEEKS; | 
| Dave Chinner | e80dfa1 | 2013-08-28 10:18:05 +1000 | [diff] [blame] | 1833 | btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE; | 
| Michal Hocko | d210a98 | 2017-11-23 17:13:40 +0100 | [diff] [blame] | 1834 | if (register_shrinker(&btp->bt_shrinker)) | 
|  | 1835 | goto error_pcpu; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1836 | return btp; | 
|  | 1837 |  | 
| Michal Hocko | d210a98 | 2017-11-23 17:13:40 +0100 | [diff] [blame] | 1838 | error_pcpu: | 
|  | 1839 | percpu_counter_destroy(&btp->bt_io_count); | 
|  | 1840 | error_lru: | 
|  | 1841 | list_lru_destroy(&btp->bt_lru); | 
|  | 1842 | error_free: | 
| Denys Vlasenko | f0e2d93 | 2008-05-19 16:31:57 +1000 | [diff] [blame] | 1843 | kmem_free(btp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1844 | return NULL; | 
|  | 1845 | } | 
|  | 1846 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1847 | /* | 
| Brian Foster | 20e8a06 | 2017-04-21 12:40:44 -0700 | [diff] [blame] | 1848 | * Cancel a delayed write list. | 
|  | 1849 | * | 
|  | 1850 | * Remove each buffer from the list, clear the delwri queue flag and drop the | 
|  | 1851 | * associated buffer reference. | 
|  | 1852 | */ | 
|  | 1853 | void | 
|  | 1854 | xfs_buf_delwri_cancel( | 
|  | 1855 | struct list_head	*list) | 
|  | 1856 | { | 
|  | 1857 | struct xfs_buf		*bp; | 
|  | 1858 |  | 
|  | 1859 | while (!list_empty(list)) { | 
|  | 1860 | bp = list_first_entry(list, struct xfs_buf, b_list); | 
|  | 1861 |  | 
|  | 1862 | xfs_buf_lock(bp); | 
|  | 1863 | bp->b_flags &= ~_XBF_DELWRI_Q; | 
|  | 1864 | list_del_init(&bp->b_list); | 
|  | 1865 | xfs_buf_relse(bp); | 
|  | 1866 | } | 
|  | 1867 | } | 
|  | 1868 |  | 
|  | 1869 | /* | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1870 | * Add a buffer to the delayed write list. | 
|  | 1871 | * | 
|  | 1872 | * This queues a buffer for writeout if it hasn't already been.  Note that | 
|  | 1873 | * neither this routine nor the buffer list submission functions perform | 
|  | 1874 | * any internal synchronization.  It is expected that the lists are thread-local | 
|  | 1875 | * to the callers. | 
|  | 1876 | * | 
|  | 1877 | * Returns true if we queued up the buffer, or false if it already had | 
|  | 1878 | * been on the buffer list. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1879 | */ | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1880 | bool | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 1881 | xfs_buf_delwri_queue( | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1882 | struct xfs_buf		*bp, | 
|  | 1883 | struct list_head	*list) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1884 | { | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1885 | ASSERT(xfs_buf_islocked(bp)); | 
|  | 1886 | ASSERT(!(bp->b_flags & XBF_READ)); | 
|  | 1887 |  | 
|  | 1888 | /* | 
|  | 1889 | * If the buffer is already marked delwri it already is queued up | 
|  | 1890 | * by someone else for imediate writeout.  Just ignore it in that | 
|  | 1891 | * case. | 
|  | 1892 | */ | 
|  | 1893 | if (bp->b_flags & _XBF_DELWRI_Q) { | 
|  | 1894 | trace_xfs_buf_delwri_queued(bp, _RET_IP_); | 
|  | 1895 | return false; | 
|  | 1896 | } | 
| David Chinner | a6867a6 | 2006-01-11 15:37:58 +1100 | [diff] [blame] | 1897 |  | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 1898 | trace_xfs_buf_delwri_queue(bp, _RET_IP_); | 
|  | 1899 |  | 
| Dave Chinner | d808f61 | 2010-02-02 10:13:42 +1100 | [diff] [blame] | 1900 | /* | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1901 | * If a buffer gets written out synchronously or marked stale while it | 
|  | 1902 | * is on a delwri list we lazily remove it. To do this, the other party | 
|  | 1903 | * clears the  _XBF_DELWRI_Q flag but otherwise leaves the buffer alone. | 
|  | 1904 | * It remains referenced and on the list.  In a rare corner case it | 
|  | 1905 | * might get readded to a delwri list after the synchronous writeout, in | 
|  | 1906 | * which case we need just need to re-add the flag here. | 
| Dave Chinner | d808f61 | 2010-02-02 10:13:42 +1100 | [diff] [blame] | 1907 | */ | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1908 | bp->b_flags |= _XBF_DELWRI_Q; | 
|  | 1909 | if (list_empty(&bp->b_list)) { | 
|  | 1910 | atomic_inc(&bp->b_hold); | 
|  | 1911 | list_add_tail(&bp->b_list, list); | 
| David Chinner | 585e6d8 | 2007-02-10 18:32:29 +1100 | [diff] [blame] | 1912 | } | 
| David Chinner | 585e6d8 | 2007-02-10 18:32:29 +1100 | [diff] [blame] | 1913 |  | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1914 | return true; | 
| David Chinner | 585e6d8 | 2007-02-10 18:32:29 +1100 | [diff] [blame] | 1915 | } | 
|  | 1916 |  | 
| Dave Chinner | 089716a | 2010-01-26 15:13:25 +1100 | [diff] [blame] | 1917 | /* | 
|  | 1918 | * Compare function is more complex than it needs to be because | 
|  | 1919 | * the return value is only 32 bits and we are doing comparisons | 
|  | 1920 | * on 64 bit values | 
|  | 1921 | */ | 
|  | 1922 | static int | 
|  | 1923 | xfs_buf_cmp( | 
|  | 1924 | void		*priv, | 
|  | 1925 | struct list_head *a, | 
|  | 1926 | struct list_head *b) | 
|  | 1927 | { | 
|  | 1928 | struct xfs_buf	*ap = container_of(a, struct xfs_buf, b_list); | 
|  | 1929 | struct xfs_buf	*bp = container_of(b, struct xfs_buf, b_list); | 
|  | 1930 | xfs_daddr_t		diff; | 
|  | 1931 |  | 
| Mark Tinguely | f4b4242 | 2012-12-04 17:18:02 -0600 | [diff] [blame] | 1932 | diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn; | 
| Dave Chinner | 089716a | 2010-01-26 15:13:25 +1100 | [diff] [blame] | 1933 | if (diff < 0) | 
|  | 1934 | return -1; | 
|  | 1935 | if (diff > 0) | 
|  | 1936 | return 1; | 
|  | 1937 | return 0; | 
|  | 1938 | } | 
|  | 1939 |  | 
| Dave Chinner | 26f1fe8 | 2016-06-01 17:38:15 +1000 | [diff] [blame] | 1940 | /* | 
|  | 1941 | * submit buffers for write. | 
|  | 1942 | * | 
|  | 1943 | * When we have a large buffer list, we do not want to hold all the buffers | 
|  | 1944 | * locked while we block on the request queue waiting for IO dispatch. To avoid | 
|  | 1945 | * this problem, we lock and submit buffers in groups of 50, thereby minimising | 
|  | 1946 | * the lock hold times for lists which may contain thousands of objects. | 
|  | 1947 | * | 
|  | 1948 | * To do this, we sort the buffer list before we walk the list to lock and | 
|  | 1949 | * submit buffers, and we plug and unplug around each group of buffers we | 
|  | 1950 | * submit. | 
|  | 1951 | */ | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1952 | static int | 
| Dave Chinner | 26f1fe8 | 2016-06-01 17:38:15 +1000 | [diff] [blame] | 1953 | xfs_buf_delwri_submit_buffers( | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1954 | struct list_head	*buffer_list, | 
| Dave Chinner | 26f1fe8 | 2016-06-01 17:38:15 +1000 | [diff] [blame] | 1955 | struct list_head	*wait_list) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1956 | { | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1957 | struct xfs_buf		*bp, *n; | 
| Dave Chinner | 26f1fe8 | 2016-06-01 17:38:15 +1000 | [diff] [blame] | 1958 | LIST_HEAD		(submit_list); | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1959 | int			pinned = 0; | 
| Dave Chinner | 26f1fe8 | 2016-06-01 17:38:15 +1000 | [diff] [blame] | 1960 | struct blk_plug		plug; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1961 |  | 
| Dave Chinner | 26f1fe8 | 2016-06-01 17:38:15 +1000 | [diff] [blame] | 1962 | list_sort(NULL, buffer_list, xfs_buf_cmp); | 
|  | 1963 |  | 
|  | 1964 | blk_start_plug(&plug); | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1965 | list_for_each_entry_safe(bp, n, buffer_list, b_list) { | 
| Dave Chinner | 26f1fe8 | 2016-06-01 17:38:15 +1000 | [diff] [blame] | 1966 | if (!wait_list) { | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1967 | if (xfs_buf_ispinned(bp)) { | 
|  | 1968 | pinned++; | 
|  | 1969 | continue; | 
|  | 1970 | } | 
|  | 1971 | if (!xfs_buf_trylock(bp)) | 
|  | 1972 | continue; | 
|  | 1973 | } else { | 
|  | 1974 | xfs_buf_lock(bp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1975 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1976 |  | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1977 | /* | 
|  | 1978 | * Someone else might have written the buffer synchronously or | 
|  | 1979 | * marked it stale in the meantime.  In that case only the | 
|  | 1980 | * _XBF_DELWRI_Q flag got cleared, and we have to drop the | 
|  | 1981 | * reference and remove it from the list here. | 
|  | 1982 | */ | 
|  | 1983 | if (!(bp->b_flags & _XBF_DELWRI_Q)) { | 
|  | 1984 | list_del_init(&bp->b_list); | 
|  | 1985 | xfs_buf_relse(bp); | 
|  | 1986 | continue; | 
|  | 1987 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1988 |  | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1989 | trace_xfs_buf_delwri_split(bp, _RET_IP_); | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 1990 |  | 
| Dave Chinner | cf53e99 | 2014-10-02 09:04:01 +1000 | [diff] [blame] | 1991 | /* | 
| Dave Chinner | 26f1fe8 | 2016-06-01 17:38:15 +1000 | [diff] [blame] | 1992 | * We do all IO submission async. This means if we need | 
|  | 1993 | * to wait for IO completion we need to take an extra | 
|  | 1994 | * reference so the buffer is still valid on the other | 
|  | 1995 | * side. We need to move the buffer onto the io_list | 
|  | 1996 | * at this point so the caller can still access it. | 
| Dave Chinner | cf53e99 | 2014-10-02 09:04:01 +1000 | [diff] [blame] | 1997 | */ | 
| Dave Chinner | bbfeb61 | 2016-07-20 11:53:35 +1000 | [diff] [blame] | 1998 | bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL); | 
| Dave Chinner | 26f1fe8 | 2016-06-01 17:38:15 +1000 | [diff] [blame] | 1999 | bp->b_flags |= XBF_WRITE | XBF_ASYNC; | 
|  | 2000 | if (wait_list) { | 
| Dave Chinner | cf53e99 | 2014-10-02 09:04:01 +1000 | [diff] [blame] | 2001 | xfs_buf_hold(bp); | 
| Dave Chinner | 26f1fe8 | 2016-06-01 17:38:15 +1000 | [diff] [blame] | 2002 | list_move_tail(&bp->b_list, wait_list); | 
|  | 2003 | } else | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 2004 | list_del_init(&bp->b_list); | 
| Dave Chinner | 8dac392 | 2014-10-02 09:04:40 +1000 | [diff] [blame] | 2005 |  | 
| Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 2006 | xfs_buf_submit(bp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2007 | } | 
| Christoph Hellwig | a1b7ea5 | 2011-03-30 11:05:09 +0000 | [diff] [blame] | 2008 | blk_finish_plug(&plug); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2009 |  | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 2010 | return pinned; | 
|  | 2011 | } | 
| Nathan Scott | f07c225 | 2006-09-28 10:52:15 +1000 | [diff] [blame] | 2012 |  | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 2013 | /* | 
|  | 2014 | * Write out a buffer list asynchronously. | 
|  | 2015 | * | 
|  | 2016 | * This will take the @buffer_list, write all non-locked and non-pinned buffers | 
|  | 2017 | * out and not wait for I/O completion on any of the buffers.  This interface | 
|  | 2018 | * is only safely useable for callers that can track I/O completion by higher | 
|  | 2019 | * level means, e.g. AIL pushing as the @buffer_list is consumed in this | 
|  | 2020 | * function. | 
|  | 2021 | */ | 
|  | 2022 | int | 
|  | 2023 | xfs_buf_delwri_submit_nowait( | 
|  | 2024 | struct list_head	*buffer_list) | 
|  | 2025 | { | 
| Dave Chinner | 26f1fe8 | 2016-06-01 17:38:15 +1000 | [diff] [blame] | 2026 | return xfs_buf_delwri_submit_buffers(buffer_list, NULL); | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 2027 | } | 
|  | 2028 |  | 
|  | 2029 | /* | 
|  | 2030 | * Write out a buffer list synchronously. | 
|  | 2031 | * | 
|  | 2032 | * This will take the @buffer_list, write all buffers out and wait for I/O | 
|  | 2033 | * completion on all of the buffers. @buffer_list is consumed by the function, | 
|  | 2034 | * so callers must have some other way of tracking buffers if they require such | 
|  | 2035 | * functionality. | 
|  | 2036 | */ | 
|  | 2037 | int | 
|  | 2038 | xfs_buf_delwri_submit( | 
|  | 2039 | struct list_head	*buffer_list) | 
|  | 2040 | { | 
| Dave Chinner | 26f1fe8 | 2016-06-01 17:38:15 +1000 | [diff] [blame] | 2041 | LIST_HEAD		(wait_list); | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 2042 | int			error = 0, error2; | 
|  | 2043 | struct xfs_buf		*bp; | 
|  | 2044 |  | 
| Dave Chinner | 26f1fe8 | 2016-06-01 17:38:15 +1000 | [diff] [blame] | 2045 | xfs_buf_delwri_submit_buffers(buffer_list, &wait_list); | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 2046 |  | 
|  | 2047 | /* Wait for IO to complete. */ | 
| Dave Chinner | 26f1fe8 | 2016-06-01 17:38:15 +1000 | [diff] [blame] | 2048 | while (!list_empty(&wait_list)) { | 
|  | 2049 | bp = list_first_entry(&wait_list, struct xfs_buf, b_list); | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 2050 |  | 
|  | 2051 | list_del_init(&bp->b_list); | 
| Dave Chinner | cf53e99 | 2014-10-02 09:04:01 +1000 | [diff] [blame] | 2052 |  | 
|  | 2053 | /* locking the buffer will wait for async IO completion. */ | 
|  | 2054 | xfs_buf_lock(bp); | 
|  | 2055 | error2 = bp->b_error; | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 2056 | xfs_buf_relse(bp); | 
|  | 2057 | if (!error) | 
|  | 2058 | error = error2; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2059 | } | 
|  | 2060 |  | 
| Christoph Hellwig | 43ff212 | 2012-04-23 15:58:39 +1000 | [diff] [blame] | 2061 | return error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2062 | } | 
|  | 2063 |  | 
| Brian Foster | 7912e7f | 2017-06-14 21:21:45 -0700 | [diff] [blame] | 2064 | /* | 
|  | 2065 | * Push a single buffer on a delwri queue. | 
|  | 2066 | * | 
|  | 2067 | * The purpose of this function is to submit a single buffer of a delwri queue | 
|  | 2068 | * and return with the buffer still on the original queue. The waiting delwri | 
|  | 2069 | * buffer submission infrastructure guarantees transfer of the delwri queue | 
|  | 2070 | * buffer reference to a temporary wait list. We reuse this infrastructure to | 
|  | 2071 | * transfer the buffer back to the original queue. | 
|  | 2072 | * | 
|  | 2073 | * Note the buffer transitions from the queued state, to the submitted and wait | 
|  | 2074 | * listed state and back to the queued state during this call. The buffer | 
|  | 2075 | * locking and queue management logic between _delwri_pushbuf() and | 
|  | 2076 | * _delwri_queue() guarantee that the buffer cannot be queued to another list | 
|  | 2077 | * before returning. | 
|  | 2078 | */ | 
|  | 2079 | int | 
|  | 2080 | xfs_buf_delwri_pushbuf( | 
|  | 2081 | struct xfs_buf		*bp, | 
|  | 2082 | struct list_head	*buffer_list) | 
|  | 2083 | { | 
|  | 2084 | LIST_HEAD		(submit_list); | 
|  | 2085 | int			error; | 
|  | 2086 |  | 
|  | 2087 | ASSERT(bp->b_flags & _XBF_DELWRI_Q); | 
|  | 2088 |  | 
|  | 2089 | trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_); | 
|  | 2090 |  | 
|  | 2091 | /* | 
|  | 2092 | * Isolate the buffer to a new local list so we can submit it for I/O | 
|  | 2093 | * independently from the rest of the original list. | 
|  | 2094 | */ | 
|  | 2095 | xfs_buf_lock(bp); | 
|  | 2096 | list_move(&bp->b_list, &submit_list); | 
|  | 2097 | xfs_buf_unlock(bp); | 
|  | 2098 |  | 
|  | 2099 | /* | 
|  | 2100 | * Delwri submission clears the DELWRI_Q buffer flag and returns with | 
|  | 2101 | * the buffer on the wait list with an associated reference. Rather than | 
|  | 2102 | * bounce the buffer from a local wait list back to the original list | 
|  | 2103 | * after I/O completion, reuse the original list as the wait list. | 
|  | 2104 | */ | 
|  | 2105 | xfs_buf_delwri_submit_buffers(&submit_list, buffer_list); | 
|  | 2106 |  | 
|  | 2107 | /* | 
|  | 2108 | * The buffer is now under I/O and wait listed as during typical delwri | 
|  | 2109 | * submission. Lock the buffer to wait for I/O completion. Rather than | 
|  | 2110 | * remove the buffer from the wait list and release the reference, we | 
|  | 2111 | * want to return with the buffer queued to the original list. The | 
|  | 2112 | * buffer already sits on the original list with a wait list reference, | 
|  | 2113 | * however. If we let the queue inherit that wait list reference, all we | 
|  | 2114 | * need to do is reset the DELWRI_Q flag. | 
|  | 2115 | */ | 
|  | 2116 | xfs_buf_lock(bp); | 
|  | 2117 | error = bp->b_error; | 
|  | 2118 | bp->b_flags |= _XBF_DELWRI_Q; | 
|  | 2119 | xfs_buf_unlock(bp); | 
|  | 2120 |  | 
|  | 2121 | return error; | 
|  | 2122 | } | 
|  | 2123 |  | 
| Christoph Hellwig | 04d8b28 | 2005-11-02 10:15:05 +1100 | [diff] [blame] | 2124 | int __init | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 2125 | xfs_buf_init(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2126 | { | 
| Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 2127 | xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf", | 
|  | 2128 | KM_ZONE_HWALIGN, NULL); | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 2129 | if (!xfs_buf_zone) | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 2130 | goto out; | 
| Christoph Hellwig | 04d8b28 | 2005-11-02 10:15:05 +1100 | [diff] [blame] | 2131 |  | 
| Christoph Hellwig | 23ea403 | 2005-06-21 15:14:01 +1000 | [diff] [blame] | 2132 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2133 |  | 
| Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 2134 | out: | 
| Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 2135 | return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2136 | } | 
|  | 2137 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2138 | void | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 2139 | xfs_buf_terminate(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2140 | { | 
| Nathan Scott | ce8e922 | 2006-01-11 15:39:08 +1100 | [diff] [blame] | 2141 | kmem_zone_destroy(xfs_buf_zone); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2142 | } | 
| Brian Foster | 7561d27 | 2017-10-17 14:16:29 -0700 | [diff] [blame] | 2143 |  | 
|  | 2144 | void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref) | 
|  | 2145 | { | 
| Brian Foster | 7561d27 | 2017-10-17 14:16:29 -0700 | [diff] [blame] | 2146 | /* | 
|  | 2147 | * Set the lru reference count to 0 based on the error injection tag. | 
|  | 2148 | * This allows userspace to disrupt buffer caching for debug/testing | 
|  | 2149 | * purposes. | 
|  | 2150 | */ | 
| Brian Foster | 4eadcf9 | 2017-10-27 09:20:28 -0700 | [diff] [blame] | 2151 | if (XFS_TEST_ERROR(false, bp->b_target->bt_mount, | 
|  | 2152 | XFS_ERRTAG_BUF_LRU_REF)) | 
| Brian Foster | 7561d27 | 2017-10-17 14:16:29 -0700 | [diff] [blame] | 2153 | lru_ref = 0; | 
|  | 2154 |  | 
|  | 2155 | atomic_set(&bp->b_lru_ref, lru_ref); | 
|  | 2156 | } |