Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 1 | #ifndef __EXTENTIO__ |
| 2 | #define __EXTENTIO__ |
| 3 | |
| 4 | #include <linux/rbtree.h> |
Elena Reshetova | b7ac31b | 2017-03-03 10:55:19 +0200 | [diff] [blame] | 5 | #include <linux/refcount.h> |
Qu Wenruo | ac46777 | 2015-10-12 12:08:16 +0800 | [diff] [blame] | 6 | #include "ulist.h" |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 7 | |
| 8 | /* bits for the extent state */ |
David Sterba | 9ee49a04 | 2015-01-14 19:52:13 +0100 | [diff] [blame] | 9 | #define EXTENT_DIRTY (1U << 0) |
| 10 | #define EXTENT_WRITEBACK (1U << 1) |
| 11 | #define EXTENT_UPTODATE (1U << 2) |
| 12 | #define EXTENT_LOCKED (1U << 3) |
| 13 | #define EXTENT_NEW (1U << 4) |
| 14 | #define EXTENT_DELALLOC (1U << 5) |
| 15 | #define EXTENT_DEFRAG (1U << 6) |
| 16 | #define EXTENT_BOUNDARY (1U << 9) |
| 17 | #define EXTENT_NODATASUM (1U << 10) |
Filipe Manana | a315e68 | 2017-03-06 23:04:20 +0000 | [diff] [blame] | 18 | #define EXTENT_CLEAR_META_RESV (1U << 11) |
David Sterba | 9ee49a04 | 2015-01-14 19:52:13 +0100 | [diff] [blame] | 19 | #define EXTENT_FIRST_DELALLOC (1U << 12) |
| 20 | #define EXTENT_NEED_WAIT (1U << 13) |
| 21 | #define EXTENT_DAMAGED (1U << 14) |
| 22 | #define EXTENT_NORESERVE (1U << 15) |
Qu Wenruo | 5247255 | 2015-10-12 16:05:40 +0800 | [diff] [blame] | 23 | #define EXTENT_QGROUP_RESERVED (1U << 16) |
Wang Xiaoguang | 1851309 | 2016-07-25 15:51:40 +0800 | [diff] [blame] | 24 | #define EXTENT_CLEAR_DATA_RESV (1U << 17) |
Filipe Manana | a7e3b97 | 2017-04-03 10:45:46 +0100 | [diff] [blame] | 25 | #define EXTENT_DELALLOC_NEW (1U << 18) |
David Sterba | 9ee49a04 | 2015-01-14 19:52:13 +0100 | [diff] [blame] | 26 | #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) |
Filipe Manana | a315e68 | 2017-03-06 23:04:20 +0000 | [diff] [blame] | 27 | #define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \ |
| 28 | EXTENT_CLEAR_DATA_RESV) |
David Sterba | 9ee49a04 | 2015-01-14 19:52:13 +0100 | [diff] [blame] | 29 | #define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC) |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 30 | |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 31 | /* |
| 32 | * flags for bio submission. The high bits indicate the compression |
| 33 | * type for this bio |
| 34 | */ |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 35 | #define EXTENT_BIO_COMPRESSED 1 |
Josef Bacik | de0022b | 2012-09-25 14:25:58 -0400 | [diff] [blame] | 36 | #define EXTENT_BIO_TREE_LOG 2 |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 37 | #define EXTENT_BIO_FLAG_SHIFT 16 |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 38 | |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 39 | /* these are bit numbers for test/set bit */ |
| 40 | #define EXTENT_BUFFER_UPTODATE 0 |
Chris Mason | b947343 | 2009-03-13 11:00:37 -0400 | [diff] [blame] | 41 | #define EXTENT_BUFFER_DIRTY 2 |
Josef Bacik | a826d6d | 2011-03-16 13:42:43 -0400 | [diff] [blame] | 42 | #define EXTENT_BUFFER_CORRUPT 3 |
Arne Jansen | ab0fff0 | 2011-05-23 14:25:41 +0200 | [diff] [blame] | 43 | #define EXTENT_BUFFER_READAHEAD 4 /* this got triggered by readahead */ |
Josef Bacik | 3083ee2 | 2012-03-09 16:01:49 -0500 | [diff] [blame] | 44 | #define EXTENT_BUFFER_TREE_REF 5 |
| 45 | #define EXTENT_BUFFER_STALE 6 |
Josef Bacik | 0b32f4b | 2012-03-13 09:38:00 -0400 | [diff] [blame] | 46 | #define EXTENT_BUFFER_WRITEBACK 7 |
Filipe Manana | 656f30d | 2014-09-26 12:25:56 +0100 | [diff] [blame] | 47 | #define EXTENT_BUFFER_READ_ERR 8 /* read IO error */ |
Jan Schmidt | 815a51c | 2012-05-16 17:00:02 +0200 | [diff] [blame] | 48 | #define EXTENT_BUFFER_DUMMY 9 |
Josef Bacik | 34b41ac | 2013-12-13 10:41:51 -0500 | [diff] [blame] | 49 | #define EXTENT_BUFFER_IN_TREE 10 |
Filipe Manana | 656f30d | 2014-09-26 12:25:56 +0100 | [diff] [blame] | 50 | #define EXTENT_BUFFER_WRITE_ERR 11 /* write IO error */ |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 51 | |
Liu Bo | da2c700 | 2017-02-10 16:41:05 +0100 | [diff] [blame] | 52 | /* these are flags for __process_pages_contig */ |
Josef Bacik | c2790a2 | 2013-07-29 11:20:47 -0400 | [diff] [blame] | 53 | #define PAGE_UNLOCK (1 << 0) |
| 54 | #define PAGE_CLEAR_DIRTY (1 << 1) |
| 55 | #define PAGE_SET_WRITEBACK (1 << 2) |
| 56 | #define PAGE_END_WRITEBACK (1 << 3) |
| 57 | #define PAGE_SET_PRIVATE2 (1 << 4) |
Filipe Manana | 704de49 | 2014-10-06 22:14:22 +0100 | [diff] [blame] | 58 | #define PAGE_SET_ERROR (1 << 5) |
Liu Bo | da2c700 | 2017-02-10 16:41:05 +0100 | [diff] [blame] | 59 | #define PAGE_LOCK (1 << 6) |
Chris Mason | a791e35 | 2009-10-08 11:27:10 -0400 | [diff] [blame] | 60 | |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 61 | /* |
| 62 | * page->private values. Every page that is controlled by the extent |
| 63 | * map has page->private set to one. |
| 64 | */ |
| 65 | #define EXTENT_PAGE_PRIVATE 1 |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 66 | |
Omar Sandoval | 2fe1d55 | 2016-09-22 17:24:20 -0700 | [diff] [blame] | 67 | /* |
| 68 | * The extent buffer bitmap operations are done with byte granularity instead of |
| 69 | * word granularity for two reasons: |
| 70 | * 1. The bitmaps must be little-endian on disk. |
| 71 | * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a |
| 72 | * single word in a bitmap may straddle two pages in the extent buffer. |
| 73 | */ |
| 74 | #define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE) |
| 75 | #define BYTE_MASK ((1 << BITS_PER_BYTE) - 1) |
| 76 | #define BITMAP_FIRST_BYTE_MASK(start) \ |
| 77 | ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK) |
| 78 | #define BITMAP_LAST_BYTE_MASK(nbits) \ |
| 79 | (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1))) |
| 80 | |
| 81 | static inline int le_test_bit(int nr, const u8 *addr) |
| 82 | { |
| 83 | return 1U & (addr[BIT_BYTE(nr)] >> (nr & (BITS_PER_BYTE-1))); |
| 84 | } |
| 85 | |
| 86 | extern void le_bitmap_set(u8 *map, unsigned int start, int len); |
| 87 | extern void le_bitmap_clear(u8 *map, unsigned int start, int len); |
| 88 | |
Chris Mason | 70dec80 | 2008-01-29 09:59:12 -0500 | [diff] [blame] | 89 | struct extent_state; |
Josef Bacik | ea46679 | 2012-03-26 21:57:36 -0400 | [diff] [blame] | 90 | struct btrfs_root; |
Nikolay Borisov | 6fc0ef6 | 2017-02-20 13:51:03 +0200 | [diff] [blame] | 91 | struct btrfs_inode; |
Miao Xie | facc8a22 | 2013-07-25 19:22:34 +0800 | [diff] [blame] | 92 | struct btrfs_io_bio; |
David Sterba | 47dc196 | 2016-02-11 13:24:13 +0100 | [diff] [blame] | 93 | struct io_failure_record; |
Chris Mason | 70dec80 | 2008-01-29 09:59:12 -0500 | [diff] [blame] | 94 | |
Mike Christie | 81a75f6 | 2016-06-05 14:31:54 -0500 | [diff] [blame] | 95 | typedef int (extent_submit_bio_hook_t)(struct inode *inode, struct bio *bio, |
| 96 | int mirror_num, unsigned long bio_flags, |
| 97 | u64 bio_offset); |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 98 | struct extent_io_ops { |
David Sterba | 4d53ddd | 2017-02-17 15:27:44 +0100 | [diff] [blame] | 99 | /* |
| 100 | * The following callbacks must be allways defined, the function |
| 101 | * pointer will be called unconditionally. |
| 102 | */ |
Chris Mason | 44b8bd7 | 2008-04-16 11:14:51 -0400 | [diff] [blame] | 103 | extent_submit_bio_hook_t *submit_bio_hook; |
Miao Xie | facc8a22 | 2013-07-25 19:22:34 +0800 | [diff] [blame] | 104 | int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset, |
| 105 | struct page *page, u64 start, u64 end, |
| 106 | int mirror); |
David Sterba | 4d53ddd | 2017-02-17 15:27:44 +0100 | [diff] [blame] | 107 | int (*merge_bio_hook)(struct page *page, unsigned long offset, |
| 108 | size_t size, struct bio *bio, |
| 109 | unsigned long bio_flags); |
David Sterba | 20a7db8 | 2017-02-17 16:24:29 +0100 | [diff] [blame] | 110 | int (*readpage_io_failed_hook)(struct page *page, int failed_mirror); |
David Sterba | 4d53ddd | 2017-02-17 15:27:44 +0100 | [diff] [blame] | 111 | |
| 112 | /* |
| 113 | * Optional hooks, called if the pointer is not NULL |
| 114 | */ |
| 115 | int (*fill_delalloc)(struct inode *inode, struct page *locked_page, |
| 116 | u64 start, u64 end, int *page_started, |
| 117 | unsigned long *nr_written); |
David Sterba | 4d53ddd | 2017-02-17 15:27:44 +0100 | [diff] [blame] | 118 | |
| 119 | int (*writepage_start_hook)(struct page *page, u64 start, u64 end); |
David Sterba | c3988d6 | 2017-02-17 15:18:32 +0100 | [diff] [blame] | 120 | void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 121 | struct extent_state *state, int uptodate); |
Jeff Mahoney | 1bf8504 | 2011-07-21 16:56:09 +0000 | [diff] [blame] | 122 | void (*set_bit_hook)(struct inode *inode, struct extent_state *state, |
David Sterba | 9ee49a04 | 2015-01-14 19:52:13 +0100 | [diff] [blame] | 123 | unsigned *bits); |
Nikolay Borisov | 6fc0ef6 | 2017-02-20 13:51:03 +0200 | [diff] [blame] | 124 | void (*clear_bit_hook)(struct btrfs_inode *inode, |
| 125 | struct extent_state *state, |
| 126 | unsigned *bits); |
Jeff Mahoney | 1bf8504 | 2011-07-21 16:56:09 +0000 | [diff] [blame] | 127 | void (*merge_extent_hook)(struct inode *inode, |
| 128 | struct extent_state *new, |
| 129 | struct extent_state *other); |
| 130 | void (*split_extent_hook)(struct inode *inode, |
| 131 | struct extent_state *orig, u64 split); |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 132 | }; |
| 133 | |
| 134 | struct extent_io_tree { |
| 135 | struct rb_root state; |
| 136 | struct address_space *mapping; |
| 137 | u64 dirty_bytes; |
Josef Bacik | 0b32f4b | 2012-03-13 09:38:00 -0400 | [diff] [blame] | 138 | int track_uptodate; |
Chris Mason | 70dec80 | 2008-01-29 09:59:12 -0500 | [diff] [blame] | 139 | spinlock_t lock; |
David Sterba | e8c9f18 | 2015-01-02 18:23:10 +0100 | [diff] [blame] | 140 | const struct extent_io_ops *ops; |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 141 | }; |
| 142 | |
| 143 | struct extent_state { |
| 144 | u64 start; |
| 145 | u64 end; /* inclusive */ |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 146 | struct rb_node rb_node; |
Josef Bacik | 9ed74f2 | 2009-09-11 16:12:44 -0400 | [diff] [blame] | 147 | |
| 148 | /* ADD NEW ELEMENTS AFTER THIS */ |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 149 | wait_queue_head_t wq; |
Elena Reshetova | b7ac31b | 2017-03-03 10:55:19 +0200 | [diff] [blame] | 150 | refcount_t refs; |
David Sterba | 9ee49a04 | 2015-01-14 19:52:13 +0100 | [diff] [blame] | 151 | unsigned state; |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 152 | |
David Sterba | 47dc196 | 2016-02-11 13:24:13 +0100 | [diff] [blame] | 153 | struct io_failure_record *failrec; |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 154 | |
Eric Sandeen | 6d49ba1 | 2013-04-22 16:12:31 +0000 | [diff] [blame] | 155 | #ifdef CONFIG_BTRFS_DEBUG |
Chris Mason | 2d2ae54 | 2008-03-26 16:24:23 -0400 | [diff] [blame] | 156 | struct list_head leak_list; |
Eric Sandeen | 6d49ba1 | 2013-04-22 16:12:31 +0000 | [diff] [blame] | 157 | #endif |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 158 | }; |
| 159 | |
Chris Mason | 727011e | 2010-08-06 13:21:20 -0400 | [diff] [blame] | 160 | #define INLINE_EXTENT_BUFFER_PAGES 16 |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 161 | #define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE) |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 162 | struct extent_buffer { |
| 163 | u64 start; |
| 164 | unsigned long len; |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 165 | unsigned long bflags; |
Josef Bacik | f28491e | 2013-12-16 13:24:27 -0500 | [diff] [blame] | 166 | struct btrfs_fs_info *fs_info; |
Josef Bacik | 3083ee2 | 2012-03-09 16:01:49 -0500 | [diff] [blame] | 167 | spinlock_t refs_lock; |
Chris Mason | 727011e | 2010-08-06 13:21:20 -0400 | [diff] [blame] | 168 | atomic_t refs; |
Josef Bacik | 0b32f4b | 2012-03-13 09:38:00 -0400 | [diff] [blame] | 169 | atomic_t io_pages; |
Josef Bacik | 5cf1ab5 | 2012-04-16 09:42:26 -0400 | [diff] [blame] | 170 | int read_mirror; |
Miao Xie | 19fe0a8 | 2010-10-26 20:57:29 -0400 | [diff] [blame] | 171 | struct rcu_head rcu_head; |
Arne Jansen | 5b25f70 | 2011-09-13 10:55:48 +0200 | [diff] [blame] | 172 | pid_t lock_owner; |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 173 | |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 174 | /* count of read lock holders on the extent buffer */ |
| 175 | atomic_t write_locks; |
| 176 | atomic_t read_locks; |
| 177 | atomic_t blocking_writers; |
| 178 | atomic_t blocking_readers; |
| 179 | atomic_t spinning_readers; |
| 180 | atomic_t spinning_writers; |
Filipe Manana | 656f30d | 2014-09-26 12:25:56 +0100 | [diff] [blame] | 181 | short lock_nested; |
| 182 | /* >= 0 if eb belongs to a log tree, -1 otherwise */ |
| 183 | short log_index; |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 184 | |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 185 | /* protects write locks */ |
| 186 | rwlock_t lock; |
| 187 | |
| 188 | /* readers use lock_wq while they wait for the write |
| 189 | * lock holders to unlock |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 190 | */ |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 191 | wait_queue_head_t write_lock_wq; |
| 192 | |
| 193 | /* writers use read_lock_wq while they wait for readers |
| 194 | * to unlock |
| 195 | */ |
| 196 | wait_queue_head_t read_lock_wq; |
David Sterba | b8dae31 | 2013-02-28 14:54:18 +0000 | [diff] [blame] | 197 | struct page *pages[INLINE_EXTENT_BUFFER_PAGES]; |
Eric Sandeen | 6d49ba1 | 2013-04-22 16:12:31 +0000 | [diff] [blame] | 198 | #ifdef CONFIG_BTRFS_DEBUG |
| 199 | struct list_head leak_list; |
| 200 | #endif |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 201 | }; |
| 202 | |
Qu Wenruo | ac46777 | 2015-10-12 12:08:16 +0800 | [diff] [blame] | 203 | /* |
| 204 | * Structure to record how many bytes and which ranges are set/cleared |
| 205 | */ |
| 206 | struct extent_changeset { |
| 207 | /* How many bytes are set/cleared in this operation */ |
| 208 | u64 bytes_changed; |
| 209 | |
| 210 | /* Changed ranges */ |
David Sterba | 53d3235 | 2017-02-13 13:42:29 +0100 | [diff] [blame] | 211 | struct ulist range_changed; |
Qu Wenruo | ac46777 | 2015-10-12 12:08:16 +0800 | [diff] [blame] | 212 | }; |
| 213 | |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 214 | static inline void extent_set_compress_type(unsigned long *bio_flags, |
| 215 | int compress_type) |
| 216 | { |
| 217 | *bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT; |
| 218 | } |
| 219 | |
| 220 | static inline int extent_compress_type(unsigned long bio_flags) |
| 221 | { |
| 222 | return bio_flags >> EXTENT_BIO_FLAG_SHIFT; |
| 223 | } |
| 224 | |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 225 | struct extent_map_tree; |
| 226 | |
Nikolay Borisov | fc4f21b | 2017-02-20 13:51:06 +0200 | [diff] [blame] | 227 | typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode, |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 228 | struct page *page, |
David Sterba | 306e16c | 2011-04-19 14:29:38 +0200 | [diff] [blame] | 229 | size_t pg_offset, |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 230 | u64 start, u64 len, |
| 231 | int create); |
| 232 | |
| 233 | void extent_io_tree_init(struct extent_io_tree *tree, |
David Sterba | f993c88 | 2011-04-20 23:35:57 +0200 | [diff] [blame] | 234 | struct address_space *mapping); |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 235 | int try_release_extent_mapping(struct extent_map_tree *map, |
Chris Mason | 70dec80 | 2008-01-29 09:59:12 -0500 | [diff] [blame] | 236 | struct extent_io_tree *tree, struct page *page, |
| 237 | gfp_t mask); |
David Sterba | f7a52a4 | 2013-04-26 14:56:29 +0000 | [diff] [blame] | 238 | int try_release_extent_buffer(struct page *page); |
Chris Mason | 1edbb73 | 2009-09-02 13:24:36 -0400 | [diff] [blame] | 239 | int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, |
David Sterba | ff13db4 | 2015-12-03 14:30:40 +0100 | [diff] [blame] | 240 | struct extent_state **cached); |
David Sterba | cd716d8 | 2015-12-03 14:41:30 +0100 | [diff] [blame] | 241 | |
| 242 | static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end) |
| 243 | { |
| 244 | return lock_extent_bits(tree, start, end, NULL); |
| 245 | } |
| 246 | |
Jeff Mahoney | d008237 | 2012-03-01 14:57:19 +0100 | [diff] [blame] | 247 | int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end); |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 248 | int extent_read_full_page(struct extent_io_tree *tree, struct page *page, |
Jan Schmidt | 8ddc7d9 | 2011-06-13 20:02:58 +0200 | [diff] [blame] | 249 | get_extent_t *get_extent, int mirror_num); |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 250 | int __init extent_io_init(void); |
| 251 | void extent_io_exit(void); |
| 252 | |
| 253 | u64 count_range_bits(struct extent_io_tree *tree, |
| 254 | u64 *start, u64 search_end, |
David Sterba | 9ee49a04 | 2015-01-14 19:52:13 +0100 | [diff] [blame] | 255 | u64 max_bytes, unsigned bits, int contig); |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 256 | |
Chris Mason | 4845e44 | 2010-05-25 20:56:50 -0400 | [diff] [blame] | 257 | void free_extent_state(struct extent_state *state); |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 258 | int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, |
David Sterba | 9ee49a04 | 2015-01-14 19:52:13 +0100 | [diff] [blame] | 259 | unsigned bits, int filled, |
David Sterba | 4107488 | 2013-04-29 13:38:46 +0000 | [diff] [blame] | 260 | struct extent_state *cached_state); |
Qu Wenruo | fefdc55 | 2015-10-12 15:35:38 +0800 | [diff] [blame] | 261 | int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, |
David Sterba | f734c44 | 2016-04-26 23:54:39 +0200 | [diff] [blame] | 262 | unsigned bits, struct extent_changeset *changeset); |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 263 | int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, |
David Sterba | 9ee49a04 | 2015-01-14 19:52:13 +0100 | [diff] [blame] | 264 | unsigned bits, int wake, int delete, |
David Sterba | 4107488 | 2013-04-29 13:38:46 +0000 | [diff] [blame] | 265 | struct extent_state **cached, gfp_t mask); |
David Sterba | c631795 | 2015-12-03 14:08:11 +0100 | [diff] [blame] | 266 | |
David Sterba | e83b1d9 | 2015-12-03 14:08:11 +0100 | [diff] [blame] | 267 | static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end) |
| 268 | { |
| 269 | return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, |
| 270 | GFP_NOFS); |
| 271 | } |
| 272 | |
| 273 | static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start, |
| 274 | u64 end, struct extent_state **cached, gfp_t mask) |
| 275 | { |
| 276 | return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, |
| 277 | mask); |
| 278 | } |
| 279 | |
| 280 | static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start, |
David Sterba | 9116621 | 2016-04-26 23:54:39 +0200 | [diff] [blame] | 281 | u64 end, unsigned bits) |
David Sterba | e83b1d9 | 2015-12-03 14:08:11 +0100 | [diff] [blame] | 282 | { |
| 283 | int wake = 0; |
| 284 | |
| 285 | if (bits & EXTENT_LOCKED) |
| 286 | wake = 1; |
| 287 | |
David Sterba | 9116621 | 2016-04-26 23:54:39 +0200 | [diff] [blame] | 288 | return clear_extent_bit(tree, start, end, bits, wake, 0, NULL, |
| 289 | GFP_NOFS); |
David Sterba | e83b1d9 | 2015-12-03 14:08:11 +0100 | [diff] [blame] | 290 | } |
| 291 | |
Qu Wenruo | d38ed27 | 2015-10-12 14:53:37 +0800 | [diff] [blame] | 292 | int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, |
David Sterba | 2c53b91 | 2016-04-26 23:54:39 +0200 | [diff] [blame] | 293 | unsigned bits, struct extent_changeset *changeset); |
Chris Mason | 4845e44 | 2010-05-25 20:56:50 -0400 | [diff] [blame] | 294 | int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, |
David Sterba | 9ee49a04 | 2015-01-14 19:52:13 +0100 | [diff] [blame] | 295 | unsigned bits, u64 *failed_start, |
Chris Mason | 4845e44 | 2010-05-25 20:56:50 -0400 | [diff] [blame] | 296 | struct extent_state **cached_state, gfp_t mask); |
David Sterba | c631795 | 2015-12-03 14:08:11 +0100 | [diff] [blame] | 297 | |
| 298 | static inline int set_extent_bits(struct extent_io_tree *tree, u64 start, |
David Sterba | ceeb0ae | 2016-04-26 23:54:39 +0200 | [diff] [blame] | 299 | u64 end, unsigned bits) |
David Sterba | c631795 | 2015-12-03 14:08:11 +0100 | [diff] [blame] | 300 | { |
David Sterba | ceeb0ae | 2016-04-26 23:54:39 +0200 | [diff] [blame] | 301 | return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS); |
David Sterba | c631795 | 2015-12-03 14:08:11 +0100 | [diff] [blame] | 302 | } |
| 303 | |
David Sterba | e83b1d9 | 2015-12-03 14:08:11 +0100 | [diff] [blame] | 304 | static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, |
| 305 | u64 end, struct extent_state **cached_state, gfp_t mask) |
| 306 | { |
| 307 | return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, |
| 308 | cached_state, mask); |
| 309 | } |
David Sterba | c631795 | 2015-12-03 14:08:11 +0100 | [diff] [blame] | 310 | |
| 311 | static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start, |
| 312 | u64 end, gfp_t mask) |
| 313 | { |
| 314 | return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL, |
| 315 | NULL, mask); |
| 316 | } |
| 317 | |
David Sterba | e83b1d9 | 2015-12-03 14:08:11 +0100 | [diff] [blame] | 318 | static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start, |
David Sterba | af6f8f6 | 2016-04-26 23:54:39 +0200 | [diff] [blame] | 319 | u64 end) |
David Sterba | e83b1d9 | 2015-12-03 14:08:11 +0100 | [diff] [blame] | 320 | { |
| 321 | return clear_extent_bit(tree, start, end, |
| 322 | EXTENT_DIRTY | EXTENT_DELALLOC | |
David Sterba | af6f8f6 | 2016-04-26 23:54:39 +0200 | [diff] [blame] | 323 | EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); |
David Sterba | e83b1d9 | 2015-12-03 14:08:11 +0100 | [diff] [blame] | 324 | } |
| 325 | |
Josef Bacik | 462d6fa | 2011-09-26 13:56:12 -0400 | [diff] [blame] | 326 | int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, |
David Sterba | 9ee49a04 | 2015-01-14 19:52:13 +0100 | [diff] [blame] | 327 | unsigned bits, unsigned clear_bits, |
David Sterba | 210aa27 | 2016-04-26 23:54:39 +0200 | [diff] [blame] | 328 | struct extent_state **cached_state); |
David Sterba | c631795 | 2015-12-03 14:08:11 +0100 | [diff] [blame] | 329 | |
| 330 | static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start, |
David Sterba | 7cd8c75 | 2016-04-26 23:54:39 +0200 | [diff] [blame] | 331 | u64 end, struct extent_state **cached_state) |
David Sterba | c631795 | 2015-12-03 14:08:11 +0100 | [diff] [blame] | 332 | { |
| 333 | return set_extent_bit(tree, start, end, |
| 334 | EXTENT_DELALLOC | EXTENT_UPTODATE, |
David Sterba | 7cd8c75 | 2016-04-26 23:54:39 +0200 | [diff] [blame] | 335 | NULL, cached_state, GFP_NOFS); |
David Sterba | c631795 | 2015-12-03 14:08:11 +0100 | [diff] [blame] | 336 | } |
| 337 | |
| 338 | static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start, |
David Sterba | 018ed4f | 2016-04-26 23:54:39 +0200 | [diff] [blame] | 339 | u64 end, struct extent_state **cached_state) |
David Sterba | c631795 | 2015-12-03 14:08:11 +0100 | [diff] [blame] | 340 | { |
| 341 | return set_extent_bit(tree, start, end, |
| 342 | EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG, |
David Sterba | 018ed4f | 2016-04-26 23:54:39 +0200 | [diff] [blame] | 343 | NULL, cached_state, GFP_NOFS); |
David Sterba | c631795 | 2015-12-03 14:08:11 +0100 | [diff] [blame] | 344 | } |
| 345 | |
| 346 | static inline int set_extent_new(struct extent_io_tree *tree, u64 start, |
David Sterba | 3744dbe | 2016-04-26 23:54:39 +0200 | [diff] [blame] | 347 | u64 end) |
David Sterba | c631795 | 2015-12-03 14:08:11 +0100 | [diff] [blame] | 348 | { |
David Sterba | 3744dbe | 2016-04-26 23:54:39 +0200 | [diff] [blame] | 349 | return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL, |
| 350 | GFP_NOFS); |
David Sterba | c631795 | 2015-12-03 14:08:11 +0100 | [diff] [blame] | 351 | } |
| 352 | |
| 353 | static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start, |
| 354 | u64 end, struct extent_state **cached_state, gfp_t mask) |
| 355 | { |
| 356 | return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL, |
| 357 | cached_state, mask); |
| 358 | } |
| 359 | |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 360 | int find_first_extent_bit(struct extent_io_tree *tree, u64 start, |
David Sterba | 9ee49a04 | 2015-01-14 19:52:13 +0100 | [diff] [blame] | 361 | u64 *start_ret, u64 *end_ret, unsigned bits, |
Josef Bacik | e613887 | 2012-09-27 17:07:30 -0400 | [diff] [blame] | 362 | struct extent_state **cached_state); |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 363 | int extent_invalidatepage(struct extent_io_tree *tree, |
| 364 | struct page *page, unsigned long offset); |
| 365 | int extent_write_full_page(struct extent_io_tree *tree, struct page *page, |
| 366 | get_extent_t *get_extent, |
| 367 | struct writeback_control *wbc); |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 368 | int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, |
| 369 | u64 start, u64 end, get_extent_t *get_extent, |
| 370 | int mode); |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 371 | int extent_writepages(struct extent_io_tree *tree, |
| 372 | struct address_space *mapping, |
| 373 | get_extent_t *get_extent, |
| 374 | struct writeback_control *wbc); |
Josef Bacik | 0b32f4b | 2012-03-13 09:38:00 -0400 | [diff] [blame] | 375 | int btree_write_cache_pages(struct address_space *mapping, |
| 376 | struct writeback_control *wbc); |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 377 | int extent_readpages(struct extent_io_tree *tree, |
| 378 | struct address_space *mapping, |
| 379 | struct list_head *pages, unsigned nr_pages, |
| 380 | get_extent_t get_extent); |
Yehuda Sadeh | 1506fcc | 2009-01-21 14:39:14 -0500 | [diff] [blame] | 381 | int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
| 382 | __u64 start, __u64 len, get_extent_t *get_extent); |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 383 | void set_page_extent_mapped(struct page *page); |
| 384 | |
Josef Bacik | f28491e | 2013-12-16 13:24:27 -0500 | [diff] [blame] | 385 | struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, |
David Sterba | ce3e698 | 2014-06-15 03:00:04 +0200 | [diff] [blame] | 386 | u64 start); |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 387 | struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, |
| 388 | u64 start, unsigned long len); |
David Sterba | 3f556f7 | 2014-06-15 03:20:26 +0200 | [diff] [blame] | 389 | struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, |
Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 390 | u64 start); |
Jan Schmidt | 815a51c | 2012-05-16 17:00:02 +0200 | [diff] [blame] | 391 | struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src); |
Josef Bacik | f28491e | 2013-12-16 13:24:27 -0500 | [diff] [blame] | 392 | struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, |
Chandra Seetharaman | 452c75c | 2013-10-07 10:45:25 -0500 | [diff] [blame] | 393 | u64 start); |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 394 | void free_extent_buffer(struct extent_buffer *eb); |
Josef Bacik | 3083ee2 | 2012-03-09 16:01:49 -0500 | [diff] [blame] | 395 | void free_extent_buffer_stale(struct extent_buffer *eb); |
Arne Jansen | bb82ab8 | 2011-06-10 14:06:53 +0200 | [diff] [blame] | 396 | #define WAIT_NONE 0 |
| 397 | #define WAIT_COMPLETE 1 |
| 398 | #define WAIT_PAGE_LOCK 2 |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 399 | int read_extent_buffer_pages(struct extent_io_tree *tree, |
Josef Bacik | 8436ea91 | 2016-09-02 15:40:03 -0400 | [diff] [blame] | 400 | struct extent_buffer *eb, int wait, |
Chris Mason | f188591 | 2008-04-09 16:28:12 -0400 | [diff] [blame] | 401 | get_extent_t *get_extent, int mirror_num); |
Josef Bacik | fd8b2b6 | 2013-04-24 16:41:19 -0400 | [diff] [blame] | 402 | void wait_on_extent_buffer_writeback(struct extent_buffer *eb); |
Robin Dong | 479ed9a | 2012-09-29 02:07:47 -0600 | [diff] [blame] | 403 | |
| 404 | static inline unsigned long num_extent_pages(u64 start, u64 len) |
| 405 | { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 406 | return ((start + len + PAGE_SIZE - 1) >> PAGE_SHIFT) - |
| 407 | (start >> PAGE_SHIFT); |
Robin Dong | 479ed9a | 2012-09-29 02:07:47 -0600 | [diff] [blame] | 408 | } |
| 409 | |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 410 | static inline void extent_buffer_get(struct extent_buffer *eb) |
| 411 | { |
| 412 | atomic_inc(&eb->refs); |
| 413 | } |
| 414 | |
| 415 | int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, |
| 416 | unsigned long start, |
| 417 | unsigned long len); |
| 418 | void read_extent_buffer(struct extent_buffer *eb, void *dst, |
| 419 | unsigned long start, |
| 420 | unsigned long len); |
Gerhard Heift | 550ac1d | 2014-01-30 16:24:01 +0100 | [diff] [blame] | 421 | int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dst, |
| 422 | unsigned long start, |
| 423 | unsigned long len); |
David Sterba | f157bf7 | 2016-11-09 17:43:38 +0100 | [diff] [blame] | 424 | void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src); |
| 425 | void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb, |
| 426 | const void *src); |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 427 | void write_extent_buffer(struct extent_buffer *eb, const void *src, |
| 428 | unsigned long start, unsigned long len); |
David Sterba | 58e8012 | 2016-11-08 18:30:31 +0100 | [diff] [blame] | 429 | void copy_extent_buffer_full(struct extent_buffer *dst, |
| 430 | struct extent_buffer *src); |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 431 | void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, |
| 432 | unsigned long dst_offset, unsigned long src_offset, |
| 433 | unsigned long len); |
| 434 | void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, |
| 435 | unsigned long src_offset, unsigned long len); |
| 436 | void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, |
| 437 | unsigned long src_offset, unsigned long len); |
David Sterba | b159fa2 | 2016-11-08 18:09:03 +0100 | [diff] [blame] | 438 | void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start, |
| 439 | unsigned long len); |
Omar Sandoval | 3e1e8bb | 2015-09-29 20:50:30 -0700 | [diff] [blame] | 440 | int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start, |
| 441 | unsigned long pos); |
| 442 | void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start, |
| 443 | unsigned long pos, unsigned long len); |
| 444 | void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start, |
| 445 | unsigned long pos, unsigned long len); |
Chris Mason | 1d4284b | 2012-03-28 20:31:37 -0400 | [diff] [blame] | 446 | void clear_extent_buffer_dirty(struct extent_buffer *eb); |
Josef Bacik | 0b32f4b | 2012-03-13 09:38:00 -0400 | [diff] [blame] | 447 | int set_extent_buffer_dirty(struct extent_buffer *eb); |
David Sterba | 09c25a8 | 2015-12-03 13:08:59 +0100 | [diff] [blame] | 448 | void set_extent_buffer_uptodate(struct extent_buffer *eb); |
David Sterba | 69ba392 | 2015-12-03 13:08:59 +0100 | [diff] [blame] | 449 | void clear_extent_buffer_uptodate(struct extent_buffer *eb); |
Josef Bacik | 0b32f4b | 2012-03-13 09:38:00 -0400 | [diff] [blame] | 450 | int extent_buffer_uptodate(struct extent_buffer *eb); |
Josef Bacik | a26e8c9 | 2014-03-28 17:07:27 -0400 | [diff] [blame] | 451 | int extent_buffer_under_io(struct extent_buffer *eb); |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 452 | int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset, |
Chris Mason | a659171 | 2011-07-19 12:04:14 -0400 | [diff] [blame] | 453 | unsigned long min_len, char **map, |
Chris Mason | d1310b2 | 2008-01-24 16:13:08 -0500 | [diff] [blame] | 454 | unsigned long *map_start, |
Chris Mason | a659171 | 2011-07-19 12:04:14 -0400 | [diff] [blame] | 455 | unsigned long *map_len); |
David Sterba | bd1fa4f | 2015-12-03 13:08:59 +0100 | [diff] [blame] | 456 | void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end); |
David Sterba | f631157 | 2015-12-03 13:08:59 +0100 | [diff] [blame] | 457 | void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end); |
David Sterba | a9d93e1 | 2015-12-03 13:08:59 +0100 | [diff] [blame] | 458 | void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, |
Qu Wenruo | ba8b04c | 2016-07-19 16:50:36 +0800 | [diff] [blame] | 459 | u64 delalloc_end, struct page *locked_page, |
David Sterba | 9ee49a04 | 2015-01-14 19:52:13 +0100 | [diff] [blame] | 460 | unsigned bits_to_clear, |
Josef Bacik | c2790a2 | 2013-07-29 11:20:47 -0400 | [diff] [blame] | 461 | unsigned long page_ops); |
Miao Xie | 88f794e | 2010-11-22 03:02:55 +0000 | [diff] [blame] | 462 | struct bio * |
| 463 | btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, |
| 464 | gfp_t gfp_flags); |
Chris Mason | 9be3395 | 2013-05-17 18:30:14 -0400 | [diff] [blame] | 465 | struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs); |
| 466 | struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask); |
Jan Schmidt | 4a54c8c | 2011-07-22 15:41:52 +0200 | [diff] [blame] | 467 | |
Stefan Behrens | 3ec706c | 2012-11-05 15:46:42 +0100 | [diff] [blame] | 468 | struct btrfs_fs_info; |
Nikolay Borisov | 9d4f7f8 | 2017-02-20 13:50:55 +0200 | [diff] [blame] | 469 | struct btrfs_inode; |
Jan Schmidt | 4a54c8c | 2011-07-22 15:41:52 +0200 | [diff] [blame] | 470 | |
Nikolay Borisov | 9d4f7f8 | 2017-02-20 13:50:55 +0200 | [diff] [blame] | 471 | int repair_io_failure(struct btrfs_inode *inode, u64 start, u64 length, |
| 472 | u64 logical, struct page *page, |
| 473 | unsigned int pg_offset, int mirror_num); |
Nikolay Borisov | b30cb44 | 2017-02-20 13:50:56 +0200 | [diff] [blame] | 474 | int clean_io_failure(struct btrfs_inode *inode, u64 start, |
| 475 | struct page *page, unsigned int pg_offset); |
David Sterba | b5227c0 | 2015-12-03 13:08:59 +0100 | [diff] [blame] | 476 | void end_extent_writepage(struct page *page, int err, u64 start, u64 end); |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 477 | int repair_eb_io_failure(struct btrfs_fs_info *fs_info, |
| 478 | struct extent_buffer *eb, int mirror_num); |
Miao Xie | 2fe6303 | 2014-09-12 18:43:59 +0800 | [diff] [blame] | 479 | |
| 480 | /* |
| 481 | * When IO fails, either with EIO or csum verification fails, we |
| 482 | * try other mirrors that might have a good copy of the data. This |
| 483 | * io_failure_record is used to record state as we go through all the |
| 484 | * mirrors. If another mirror has good data, the page is set up to date |
| 485 | * and things continue. If a good mirror can't be found, the original |
| 486 | * bio end_io callback is called to indicate things have failed. |
| 487 | */ |
| 488 | struct io_failure_record { |
| 489 | struct page *page; |
| 490 | u64 start; |
| 491 | u64 len; |
| 492 | u64 logical; |
| 493 | unsigned long bio_flags; |
| 494 | int this_mirror; |
| 495 | int failed_mirror; |
| 496 | int in_validation; |
| 497 | }; |
| 498 | |
Nikolay Borisov | 4ac1f4a | 2017-02-20 13:50:52 +0200 | [diff] [blame] | 499 | |
Nikolay Borisov | 7ab7956 | 2017-02-20 13:50:57 +0200 | [diff] [blame] | 500 | void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, |
| 501 | u64 end); |
Miao Xie | 2fe6303 | 2014-09-12 18:43:59 +0800 | [diff] [blame] | 502 | int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, |
| 503 | struct io_failure_record **failrec_ret); |
| 504 | int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio, |
| 505 | struct io_failure_record *failrec, int fail_mirror); |
| 506 | struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio, |
| 507 | struct io_failure_record *failrec, |
| 508 | struct page *page, int pg_offset, int icsum, |
Miao Xie | 8b110e3 | 2014-09-12 18:44:03 +0800 | [diff] [blame] | 509 | bio_end_io_t *endio_func, void *data); |
Nikolay Borisov | 4ac1f4a | 2017-02-20 13:50:52 +0200 | [diff] [blame] | 510 | int free_io_failure(struct btrfs_inode *inode, struct io_failure_record *rec); |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 511 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
| 512 | noinline u64 find_lock_delalloc_range(struct inode *inode, |
| 513 | struct extent_io_tree *tree, |
| 514 | struct page *locked_page, u64 *start, |
| 515 | u64 *end, u64 max_bytes); |
Chris Mason | 0d4cf4e | 2014-10-07 13:24:20 -0700 | [diff] [blame] | 516 | #endif |
Josef Bacik | faa2dbf | 2014-05-07 17:06:09 -0400 | [diff] [blame] | 517 | struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, |
Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 518 | u64 start); |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 519 | #endif |