blob: d24dbd4dac39d5d89c75abb0705cfea8012faf25 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Nathan Scott7b718762005-11-02 14:58:39 +11003 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#ifndef __XFS_BUF_H__
7#define __XFS_BUF_H__
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/list.h>
10#include <linux/types.h>
11#include <linux/spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/mm.h>
13#include <linux/fs.h>
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -070014#include <linux/dax.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/buffer_head.h>
16#include <linux/uio.h>
Dave Chinnere80dfa12013-08-28 10:18:05 +100017#include <linux/list_lru.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19/*
20 * Base types
21 */
22
Nathan Scottce8e9222006-01-11 15:39:08 +110023#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Nathan Scottce8e9222006-01-11 15:39:08 +110025typedef enum {
26 XBRW_READ = 1, /* transfer into target memory */
27 XBRW_WRITE = 2, /* transfer from target memory */
28 XBRW_ZERO = 3, /* Zero target memory */
29} xfs_buf_rw_t;
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Carlos Maiolino6fb8a902012-08-10 15:01:51 -030031#define XBF_READ (1 << 0) /* buffer intended for reading from device */
32#define XBF_WRITE (1 << 1) /* buffer intended for writing to device */
33#define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */
Brian Fosterc891c302016-07-20 11:13:43 +100034#define XBF_NO_IOACCT (1 << 3) /* bypass I/O accounting (non-LRU bufs) */
Carlos Maiolino6fb8a902012-08-10 15:01:51 -030035#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
36#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
37#define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */
Dave Chinnerac8809f2013-12-12 16:34:38 +110038#define XBF_WRITE_FAIL (1 << 24)/* async writes have failed on this buffer */
Christoph Hellwig1d5ae5d2011-07-08 14:36:32 +020039
40/* I/O hints for the BIO layer */
Carlos Maiolino6fb8a902012-08-10 15:01:51 -030041#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */
42#define XBF_FUA (1 << 11)/* force cache write through mode */
43#define XBF_FLUSH (1 << 12)/* flush the disk cache before a write */
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Christoph Hellwig807cbbd2010-06-24 11:49:12 +100045/* flags used only as arguments to access routines */
Carlos Maiolino6fb8a902012-08-10 15:01:51 -030046#define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */
47#define XBF_UNMAPPED (1 << 17)/* do not map the buffer */
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Christoph Hellwig807cbbd2010-06-24 11:49:12 +100049/* flags used only internally */
Carlos Maiolino6fb8a902012-08-10 15:01:51 -030050#define _XBF_PAGES (1 << 20)/* backed by refcounted pages */
51#define _XBF_KMEM (1 << 21)/* backed by heap memory */
52#define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */
53#define _XBF_COMPOUND (1 << 23)/* compound buffer */
Christoph Hellwig6ab455e2008-05-19 16:34:42 +100054
Christoph Hellwig807cbbd2010-06-24 11:49:12 +100055typedef unsigned int xfs_buf_flags_t;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000057#define XFS_BUF_FLAGS \
58 { XBF_READ, "READ" }, \
59 { XBF_WRITE, "WRITE" }, \
Christoph Hellwig1d5ae5d2011-07-08 14:36:32 +020060 { XBF_READ_AHEAD, "READ_AHEAD" }, \
Eric Sandeen1247ec42016-11-28 14:57:42 +110061 { XBF_NO_IOACCT, "NO_IOACCT" }, \
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000062 { XBF_ASYNC, "ASYNC" }, \
63 { XBF_DONE, "DONE" }, \
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000064 { XBF_STALE, "STALE" }, \
Dave Chinnerac8809f2013-12-12 16:34:38 +110065 { XBF_WRITE_FAIL, "WRITE_FAIL" }, \
Christoph Hellwig1d5ae5d2011-07-08 14:36:32 +020066 { XBF_SYNCIO, "SYNCIO" }, \
67 { XBF_FUA, "FUA" }, \
68 { XBF_FLUSH, "FLUSH" }, \
Carlos Maiolino6fb8a902012-08-10 15:01:51 -030069 { XBF_TRYLOCK, "TRYLOCK" }, /* should never be set */\
Dave Chinner611c9942012-04-23 15:59:07 +100070 { XBF_UNMAPPED, "UNMAPPED" }, /* ditto */\
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000071 { _XBF_PAGES, "PAGES" }, \
Dave Chinner0e6e8472011-03-26 09:16:45 +110072 { _XBF_KMEM, "KMEM" }, \
Dave Chinnercbb7baa2012-06-22 18:50:08 +100073 { _XBF_DELWRI_Q, "DELWRI_Q" }, \
Brian Foster63db7c82017-05-31 08:22:52 -070074 { _XBF_COMPOUND, "COMPOUND" }
Dave Chinnera4082352013-08-28 10:18:06 +100075
Dave Chinnerac8809f2013-12-12 16:34:38 +110076
Dave Chinnera4082352013-08-28 10:18:06 +100077/*
78 * Internal state flags.
79 */
80#define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */
Brian Foster63db7c82017-05-31 08:22:52 -070081#define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000082
Eric Sandeen7c71ee72014-01-21 16:46:23 -060083/*
84 * The xfs_buftarg contains 2 notions of "sector size" -
85 *
86 * 1) The metadata sector size, which is the minimum unit and
87 * alignment of IO which will be performed by metadata operations.
88 * 2) The device logical sector size
89 *
90 * The first is specified at mkfs time, and is stored on-disk in the
91 * superblock's sb_sectsize.
92 *
93 * The latter is derived from the underlying device, and controls direct IO
94 * alignment constraints.
95 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070096typedef struct xfs_buftarg {
Nathan Scottce8e9222006-01-11 15:39:08 +110097 dev_t bt_dev;
98 struct block_device *bt_bdev;
Dan Williams486aff52017-08-24 15:12:50 -070099 struct dax_device *bt_daxdev;
Dave Chinnerebad8612010-09-22 10:47:20 +1000100 struct xfs_mount *bt_mount;
Eric Sandeen6da54172014-01-21 16:45:52 -0600101 unsigned int bt_meta_sectorsize;
102 size_t bt_meta_sectormask;
Eric Sandeen7c71ee72014-01-21 16:46:23 -0600103 size_t bt_logical_sectorsize;
104 size_t bt_logical_sectormask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Dave Chinnerff57ab22010-11-30 17:27:57 +1100106 /* LRU control structures */
107 struct shrinker bt_shrinker;
Dave Chinnere80dfa12013-08-28 10:18:05 +1000108 struct list_lru bt_lru;
Brian Foster9c7504a2016-07-20 11:15:28 +1000109
110 struct percpu_counter bt_io_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111} xfs_buftarg_t;
112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113struct xfs_buf;
Nathan Scottce8e9222006-01-11 15:39:08 +1100114typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
Dave Chinnerc3f8fc72012-11-12 22:54:01 +1100116
Nathan Scottce8e9222006-01-11 15:39:08 +1100117#define XB_PAGES 2
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
Dave Chinnercbb7baa2012-06-22 18:50:08 +1000119struct xfs_buf_map {
120 xfs_daddr_t bm_bn; /* block number for I/O */
121 int bm_len; /* size of I/O */
122};
123
Dave Chinner3e85c862012-06-22 18:50:09 +1000124#define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
125 struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
126
Dave Chinner1813dd62012-11-14 17:54:40 +1100127struct xfs_buf_ops {
Eric Sandeen233135b2016-01-04 16:10:19 +1100128 char *name;
Dave Chinner1813dd62012-11-14 17:54:40 +1100129 void (*verify_read)(struct xfs_buf *);
130 void (*verify_write)(struct xfs_buf *);
Darrick J. Wongb5572592018-01-08 10:51:08 -0800131 xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp);
Dave Chinner1813dd62012-11-14 17:54:40 +1100132};
133
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134typedef struct xfs_buf {
Dave Chinner50f59e82010-09-24 19:59:15 +1000135 /*
136 * first cacheline holds all the fields needed for an uncontended cache
137 * hit to be fully processed. The semaphore straddles the cacheline
138 * boundary, but the counter and lock sits on the first cacheline,
139 * which is the only bit that is touched if we hit the semaphore
140 * fast-path on locking.
141 */
Lucas Stach6031e732016-12-07 17:36:36 +1100142 struct rhash_head b_rhash_head; /* pag buffer hash node */
Dave Chinnercbb7baa2012-06-22 18:50:08 +1000143 xfs_daddr_t b_bn; /* block number of buffer */
Dave Chinner4e94b712012-04-23 15:58:51 +1000144 int b_length; /* size of buffer in BBs */
Dave Chinner50f59e82010-09-24 19:59:15 +1000145 atomic_t b_hold; /* reference count */
Dave Chinner430cbeb2010-12-02 16:30:55 +1100146 atomic_t b_lru_ref; /* lru reclaim ref count */
Dave Chinner50f59e82010-09-24 19:59:15 +1000147 xfs_buf_flags_t b_flags; /* status flags */
148 struct semaphore b_sema; /* semaphore for lockables */
149
Carlos Maiolino6fb8a902012-08-10 15:01:51 -0300150 /*
151 * concurrent access to b_lru and b_lru_flags are protected by
152 * bt_lru_lock and not by b_sema
153 */
Dave Chinner430cbeb2010-12-02 16:30:55 +1100154 struct list_head b_lru; /* lru list */
Dave Chinnera4082352013-08-28 10:18:06 +1000155 spinlock_t b_lock; /* internal state lock */
156 unsigned int b_state; /* internal state flags */
Dave Chinner61be9c52014-10-02 09:04:31 +1000157 int b_io_error; /* internal IO error state */
Dave Chinner50f59e82010-09-24 19:59:15 +1000158 wait_queue_head_t b_waiters; /* unpin waiters */
159 struct list_head b_list;
160 struct xfs_perag *b_pag; /* contains rbtree root */
161 xfs_buftarg_t *b_target; /* buffer target (device) */
Nathan Scottce8e9222006-01-11 15:39:08 +1100162 void *b_addr; /* virtual address of buffer */
Brian Fosterb29c70f2014-12-04 09:43:17 +1100163 struct work_struct b_ioend_work;
164 struct workqueue_struct *b_ioend_wq; /* I/O completion wq */
Nathan Scottce8e9222006-01-11 15:39:08 +1100165 xfs_buf_iodone_t b_iodone; /* I/O completion function */
David Chinnerb4dd3302008-08-13 16:36:11 +1000166 struct completion b_iowait; /* queue for I/O waiters */
Carlos Maiolinofb1755a2018-01-24 13:38:48 -0800167 void *b_log_item;
Carlos Maiolino643c8c02018-01-24 13:38:49 -0800168 struct list_head b_li_list; /* Log items list head */
Christoph Hellwigbf9d9012011-07-13 13:43:49 +0200169 struct xfs_trans *b_transp;
Nathan Scottce8e9222006-01-11 15:39:08 +1100170 struct page **b_pages; /* array of page pointers */
171 struct page *b_page_array[XB_PAGES]; /* inline pages */
Dave Chinner3e85c862012-06-22 18:50:09 +1000172 struct xfs_buf_map *b_maps; /* compound buffer map */
Mark Tinguelyd44d9bc2012-12-04 17:18:02 -0600173 struct xfs_buf_map __b_map; /* inline compound buffer map */
Dave Chinner3e85c862012-06-22 18:50:09 +1000174 int b_map_count;
Dave Chinneraa0e8832012-04-23 15:58:52 +1000175 int b_io_length; /* IO size in BBs */
Dave Chinner50f59e82010-09-24 19:59:15 +1000176 atomic_t b_pin_count; /* pin count */
177 atomic_t b_io_remaining; /* #outstanding I/O requests */
178 unsigned int b_page_count; /* size of page array */
179 unsigned int b_offset; /* page offset in first page */
Dave Chinner24513372014-06-25 14:58:08 +1000180 int b_error; /* error code on I/O */
Carlos Maiolinoa5ea70d2016-05-18 11:08:15 +1000181
182 /*
183 * async write failure retry count. Initialised to zero on the first
184 * failure, then when it exceeds the maximum configured without a
185 * success the write is considered to be failed permanently and the
186 * iodone handler will take appropriate action.
187 *
188 * For retry timeouts, we record the jiffie of the first failure. This
189 * means that we can change the retry timeout for buffers already under
190 * I/O and thus avoid getting stuck in a retry loop with a long timeout.
191 *
192 * last_error is used to ensure that we are getting repeated errors, not
193 * different errors. e.g. a block device might change ENOSPC to EIO when
194 * a failure timeout occurs, so we want to re-initialise the error
195 * retry behaviour appropriately when that happens.
196 */
197 int b_retries;
198 unsigned long b_first_retry_time; /* in jiffies */
199 int b_last_error;
200
Dave Chinner1813dd62012-11-14 17:54:40 +1100201 const struct xfs_buf_ops *b_ops;
Dave Chinnercfb02852012-11-12 22:54:19 +1100202
Nathan Scottce8e9222006-01-11 15:39:08 +1100203#ifdef XFS_BUF_LOCK_TRACKING
204 int b_last_holder;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205#endif
206} xfs_buf_t;
207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208/* Finding and Reading Buffers */
Dave Chinner8925a3d2018-04-18 08:25:20 -0700209struct xfs_buf *xfs_buf_incore(struct xfs_buftarg *target,
210 xfs_daddr_t blkno, size_t numblks,
211 xfs_buf_flags_t flags);
Dave Chinner3e85c862012-06-22 18:50:09 +1000212
213struct xfs_buf *_xfs_buf_alloc(struct xfs_buftarg *target,
214 struct xfs_buf_map *map, int nmaps,
215 xfs_buf_flags_t flags);
216
217static inline struct xfs_buf *
218xfs_buf_alloc(
219 struct xfs_buftarg *target,
220 xfs_daddr_t blkno,
221 size_t numblks,
222 xfs_buf_flags_t flags)
223{
224 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
225 return _xfs_buf_alloc(target, &map, 1, flags);
226}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
Dave Chinner6dde2702012-06-22 18:50:10 +1000228struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target,
229 struct xfs_buf_map *map, int nmaps,
230 xfs_buf_flags_t flags);
231struct xfs_buf *xfs_buf_read_map(struct xfs_buftarg *target,
232 struct xfs_buf_map *map, int nmaps,
Dave Chinner1813dd62012-11-14 17:54:40 +1100233 xfs_buf_flags_t flags,
234 const struct xfs_buf_ops *ops);
Dave Chinner6dde2702012-06-22 18:50:10 +1000235void xfs_buf_readahead_map(struct xfs_buftarg *target,
Dave Chinnerc3f8fc72012-11-12 22:54:01 +1100236 struct xfs_buf_map *map, int nmaps,
Dave Chinner1813dd62012-11-14 17:54:40 +1100237 const struct xfs_buf_ops *ops);
Dave Chinner6dde2702012-06-22 18:50:10 +1000238
239static inline struct xfs_buf *
240xfs_buf_get(
241 struct xfs_buftarg *target,
242 xfs_daddr_t blkno,
243 size_t numblks,
244 xfs_buf_flags_t flags)
245{
246 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
247 return xfs_buf_get_map(target, &map, 1, flags);
248}
249
250static inline struct xfs_buf *
251xfs_buf_read(
252 struct xfs_buftarg *target,
253 xfs_daddr_t blkno,
254 size_t numblks,
Dave Chinnerc3f8fc72012-11-12 22:54:01 +1100255 xfs_buf_flags_t flags,
Dave Chinner1813dd62012-11-14 17:54:40 +1100256 const struct xfs_buf_ops *ops)
Dave Chinner6dde2702012-06-22 18:50:10 +1000257{
258 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
Dave Chinner1813dd62012-11-14 17:54:40 +1100259 return xfs_buf_read_map(target, &map, 1, flags, ops);
Dave Chinner6dde2702012-06-22 18:50:10 +1000260}
261
262static inline void
263xfs_buf_readahead(
264 struct xfs_buftarg *target,
265 xfs_daddr_t blkno,
Dave Chinnerc3f8fc72012-11-12 22:54:01 +1100266 size_t numblks,
Dave Chinner1813dd62012-11-14 17:54:40 +1100267 const struct xfs_buf_ops *ops)
Dave Chinner6dde2702012-06-22 18:50:10 +1000268{
269 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
Dave Chinner1813dd62012-11-14 17:54:40 +1100270 return xfs_buf_readahead_map(target, &map, 1, ops);
Dave Chinner6dde2702012-06-22 18:50:10 +1000271}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
Dave Chinnere70b73f2012-04-23 15:58:49 +1000273void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks);
274int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length);
275
276struct xfs_buf *xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
277 int flags);
Dave Chinnerba372672014-10-02 09:05:32 +1000278int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
279 size_t numblks, int flags, struct xfs_buf **bpp,
280 const struct xfs_buf_ops *ops);
Dave Chinnere70b73f2012-04-23 15:58:49 +1000281void xfs_buf_hold(struct xfs_buf *bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
283/* Releasing Buffers */
Nathan Scottce8e9222006-01-11 15:39:08 +1100284extern void xfs_buf_free(xfs_buf_t *);
285extern void xfs_buf_rele(xfs_buf_t *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
287/* Locking and Unlocking Buffers */
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200288extern int xfs_buf_trylock(xfs_buf_t *);
Nathan Scottce8e9222006-01-11 15:39:08 +1100289extern void xfs_buf_lock(xfs_buf_t *);
290extern void xfs_buf_unlock(xfs_buf_t *);
Christoph Hellwig0c842ad2011-07-08 14:36:19 +0200291#define xfs_buf_islocked(bp) \
292 ((bp)->b_sema.count <= 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293
294/* Buffer Read and Write Routines */
Christoph Hellwigc2b006c2011-08-23 08:28:07 +0000295extern int xfs_bwrite(struct xfs_buf *bp);
Dave Chinnere8aaba92014-10-02 09:04:22 +1000296extern void xfs_buf_ioend(struct xfs_buf *bp);
Darrick J. Wong31ca03c2018-01-08 10:51:02 -0800297extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
298 xfs_failaddr_t failaddr);
299#define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
Christoph Hellwig901796a2011-10-10 16:52:49 +0000300extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
Dave Chinner595bff72014-10-02 09:05:14 +1000301extern void xfs_buf_submit(struct xfs_buf *bp);
302extern int xfs_buf_submit_wait(struct xfs_buf *bp);
Dave Chinnerb9c48642010-01-20 10:47:39 +1100303extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
Nathan Scottce8e9222006-01-11 15:39:08 +1100304 xfs_buf_rw_t);
Christoph Hellwig1a1a3e92010-10-06 18:41:18 +0000305#define xfs_buf_zero(bp, off, len) \
306 xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308/* Buffer Utility Routines */
Christoph Hellwig88ee2df2015-06-22 09:44:29 +1000309extern void *xfs_buf_offset(struct xfs_buf *, size_t);
Dave Chinner5cfd28b2016-02-10 15:01:11 +1100310extern void xfs_buf_stale(struct xfs_buf *bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312/* Delayed Write Buffer Routines */
Brian Foster20e8a062017-04-21 12:40:44 -0700313extern void xfs_buf_delwri_cancel(struct list_head *);
Christoph Hellwig43ff2122012-04-23 15:58:39 +1000314extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
315extern int xfs_buf_delwri_submit(struct list_head *);
316extern int xfs_buf_delwri_submit_nowait(struct list_head *);
Brian Foster7912e7f2017-06-14 21:21:45 -0700317extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318
319/* Buffer Daemon Setup Routines */
Nathan Scottce8e9222006-01-11 15:39:08 +1100320extern int xfs_buf_init(void);
321extern void xfs_buf_terminate(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
Dave Chinnercbb7baa2012-06-22 18:50:08 +1000323/*
324 * These macros use the IO block map rather than b_bn. b_bn is now really
325 * just for the buffer cache index for cached buffers. As IO does not use b_bn
326 * anymore, uncached buffers do not use b_bn at all and hence must modify the IO
327 * map directly. Uncached buffers are not allowed to be discontiguous, so this
328 * is safe to do.
329 *
330 * In future, uncached buffers will pass the block number directly to the io
331 * request function and hence these macros will go away at that point.
332 */
Mark Tinguelyd44d9bc2012-12-04 17:18:02 -0600333#define XFS_BUF_ADDR(bp) ((bp)->b_maps[0].bm_bn)
334#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
Brian Foster7561d272017-10-17 14:16:29 -0700336void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
Dave Chinner879de982018-05-13 23:10:05 -0700338/*
339 * If the buffer is already on the LRU, do nothing. Otherwise set the buffer
340 * up with a reference count of 0 so it will be tossed from the cache when
341 * released.
342 */
343static inline void xfs_buf_oneshot(struct xfs_buf *bp)
344{
345 if (!list_empty(&bp->b_lru) || atomic_read(&bp->b_lru_ref) > 1)
346 return;
347 atomic_set(&bp->b_lru_ref, 0);
348}
349
Chandra Seetharaman811e64c2011-07-22 23:40:27 +0000350static inline int xfs_buf_ispinned(struct xfs_buf *bp)
351{
352 return atomic_read(&bp->b_pin_count);
353}
Nathan Scottce8e9222006-01-11 15:39:08 +1100354
Nathan Scottce8e9222006-01-11 15:39:08 +1100355static inline void xfs_buf_relse(xfs_buf_t *bp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356{
Christoph Hellwigbfc60172011-01-07 13:02:23 +0000357 xfs_buf_unlock(bp);
Nathan Scottce8e9222006-01-11 15:39:08 +1100358 xfs_buf_rele(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359}
360
Eric Sandeen51582172014-02-27 15:17:27 +1100361static inline int
362xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
363{
364 return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
365 cksum_offset);
366}
367
Eric Sandeenf1dbcd72014-02-27 15:18:23 +1100368static inline void
369xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
370{
371 xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
372 cksum_offset);
373}
374
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375/*
376 * Handling of buftargs.
377 */
Dave Chinnerebad8612010-09-22 10:47:20 +1000378extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *,
Dan Williams486aff52017-08-24 15:12:50 -0700379 struct block_device *, struct dax_device *);
Eric Sandeena1f69412018-04-06 10:09:42 -0700380extern void xfs_free_buftarg(struct xfs_buftarg *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381extern void xfs_wait_buftarg(xfs_buftarg_t *);
Eric Sandeena96c4152014-04-14 19:00:29 +1000382extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int);
Dave Chinnerd808f612010-02-02 10:13:42 +1100383
Nathan Scottce8e9222006-01-11 15:39:08 +1100384#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
385#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387#endif /* __XFS_BUF_H__ */