blob: 2e7c97a3f3444aec33a688a4d1f705b0486026cf [file] [log] [blame]
Chris Masond1310b22008-01-24 16:13:08 -05001#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
Chris Masond1310b22008-01-24 16:13:08 -05005#include <linux/pagemap.h>
6#include <linux/page-flags.h>
Chris Masond1310b22008-01-24 16:13:08 -05007#include <linux/spinlock.h>
8#include <linux/blkdev.h>
9#include <linux/swap.h>
Chris Masond1310b22008-01-24 16:13:08 -050010#include <linux/writeback.h>
11#include <linux/pagevec.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070012#include <linux/prefetch.h>
Dan Magenheimer90a887c2011-05-26 10:01:56 -060013#include <linux/cleancache.h>
Chris Masond1310b22008-01-24 16:13:08 -050014#include "extent_io.h"
15#include "extent_map.h"
David Woodhouse902b22f2008-08-20 08:51:49 -040016#include "ctree.h"
17#include "btrfs_inode.h"
Jan Schmidt4a54c8c2011-07-22 15:41:52 +020018#include "volumes.h"
Stefan Behrens21adbd52011-11-09 13:44:05 +010019#include "check-integrity.h"
Josef Bacik0b32f4b2012-03-13 09:38:00 -040020#include "locking.h"
Josef Bacik606686e2012-06-04 14:03:51 -040021#include "rcu-string.h"
Liu Bofe09e162013-09-22 12:54:23 +080022#include "backref.h"
Chris Masond1310b22008-01-24 16:13:08 -050023
Chris Masond1310b22008-01-24 16:13:08 -050024static struct kmem_cache *extent_state_cache;
25static struct kmem_cache *extent_buffer_cache;
Chris Mason9be33952013-05-17 18:30:14 -040026static struct bio_set *btrfs_bioset;
Chris Masond1310b22008-01-24 16:13:08 -050027
Filipe Manana27a35072014-07-06 20:09:59 +010028static inline bool extent_state_in_tree(const struct extent_state *state)
29{
30 return !RB_EMPTY_NODE(&state->rb_node);
31}
32
Eric Sandeen6d49ba12013-04-22 16:12:31 +000033#ifdef CONFIG_BTRFS_DEBUG
Chris Masond1310b22008-01-24 16:13:08 -050034static LIST_HEAD(buffers);
35static LIST_HEAD(states);
Chris Mason4bef0842008-09-08 11:18:08 -040036
Chris Masond3977122009-01-05 21:25:51 -050037static DEFINE_SPINLOCK(leak_lock);
Eric Sandeen6d49ba12013-04-22 16:12:31 +000038
39static inline
40void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
41{
42 unsigned long flags;
43
44 spin_lock_irqsave(&leak_lock, flags);
45 list_add(new, head);
46 spin_unlock_irqrestore(&leak_lock, flags);
47}
48
49static inline
50void btrfs_leak_debug_del(struct list_head *entry)
51{
52 unsigned long flags;
53
54 spin_lock_irqsave(&leak_lock, flags);
55 list_del(entry);
56 spin_unlock_irqrestore(&leak_lock, flags);
57}
58
59static inline
60void btrfs_leak_debug_check(void)
61{
62 struct extent_state *state;
63 struct extent_buffer *eb;
64
65 while (!list_empty(&states)) {
66 state = list_entry(states.next, struct extent_state, leak_list);
David Sterba9ee49a042015-01-14 19:52:13 +010067 pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
Filipe Manana27a35072014-07-06 20:09:59 +010068 state->start, state->end, state->state,
69 extent_state_in_tree(state),
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +020070 atomic_read(&state->refs));
Eric Sandeen6d49ba12013-04-22 16:12:31 +000071 list_del(&state->leak_list);
72 kmem_cache_free(extent_state_cache, state);
73 }
74
75 while (!list_empty(&buffers)) {
76 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
Frank Holtonefe120a2013-12-20 11:37:06 -050077 printk(KERN_ERR "BTRFS: buffer leak start %llu len %lu "
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +020078 "refs %d\n",
79 eb->start, eb->len, atomic_read(&eb->refs));
Eric Sandeen6d49ba12013-04-22 16:12:31 +000080 list_del(&eb->leak_list);
81 kmem_cache_free(extent_buffer_cache, eb);
82 }
83}
David Sterba8d599ae2013-04-30 15:22:23 +000084
Josef Bacika5dee372013-12-13 10:02:44 -050085#define btrfs_debug_check_extent_io_range(tree, start, end) \
86 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
David Sterba8d599ae2013-04-30 15:22:23 +000087static inline void __btrfs_debug_check_extent_io_range(const char *caller,
Josef Bacika5dee372013-12-13 10:02:44 -050088 struct extent_io_tree *tree, u64 start, u64 end)
David Sterba8d599ae2013-04-30 15:22:23 +000089{
Josef Bacika5dee372013-12-13 10:02:44 -050090 struct inode *inode;
91 u64 isize;
David Sterba8d599ae2013-04-30 15:22:23 +000092
Josef Bacika5dee372013-12-13 10:02:44 -050093 if (!tree->mapping)
94 return;
95
96 inode = tree->mapping->host;
97 isize = i_size_read(inode);
David Sterba8d599ae2013-04-30 15:22:23 +000098 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
David Sterba94647322015-10-08 11:01:36 +020099 btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
100 "%s: ino %llu isize %llu odd range [%llu,%llu]",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200101 caller, btrfs_ino(inode), isize, start, end);
David Sterba8d599ae2013-04-30 15:22:23 +0000102 }
103}
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000104#else
105#define btrfs_leak_debug_add(new, head) do {} while (0)
106#define btrfs_leak_debug_del(entry) do {} while (0)
107#define btrfs_leak_debug_check() do {} while (0)
David Sterba8d599ae2013-04-30 15:22:23 +0000108#define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
Chris Mason4bef0842008-09-08 11:18:08 -0400109#endif
Chris Masond1310b22008-01-24 16:13:08 -0500110
Chris Masond1310b22008-01-24 16:13:08 -0500111#define BUFFER_LRU_MAX 64
112
113struct tree_entry {
114 u64 start;
115 u64 end;
Chris Masond1310b22008-01-24 16:13:08 -0500116 struct rb_node rb_node;
117};
118
119struct extent_page_data {
120 struct bio *bio;
121 struct extent_io_tree *tree;
122 get_extent_t *get_extent;
Josef Bacikde0022b2012-09-25 14:25:58 -0400123 unsigned long bio_flags;
Chris Mason771ed682008-11-06 22:02:51 -0500124
125 /* tells writepage not to lock the state bits for this range
126 * it still does the unlocking
127 */
Chris Masonffbd5172009-04-20 15:50:09 -0400128 unsigned int extent_locked:1;
129
130 /* tells the submit_bio code to use a WRITE_SYNC */
131 unsigned int sync_io:1;
Chris Masond1310b22008-01-24 16:13:08 -0500132};
133
Qu Wenruod38ed272015-10-12 14:53:37 +0800134static void add_extent_changeset(struct extent_state *state, unsigned bits,
135 struct extent_changeset *changeset,
136 int set)
137{
138 int ret;
139
140 if (!changeset)
141 return;
142 if (set && (state->state & bits) == bits)
143 return;
Qu Wenruofefdc552015-10-12 15:35:38 +0800144 if (!set && (state->state & bits) == 0)
145 return;
Qu Wenruod38ed272015-10-12 14:53:37 +0800146 changeset->bytes_changed += state->end - state->start + 1;
147 ret = ulist_add(changeset->range_changed, state->start, state->end,
148 GFP_ATOMIC);
149 /* ENOMEM */
150 BUG_ON(ret < 0);
151}
152
Josef Bacik0b32f4b2012-03-13 09:38:00 -0400153static noinline void flush_write_bio(void *data);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400154static inline struct btrfs_fs_info *
155tree_fs_info(struct extent_io_tree *tree)
156{
Josef Bacika5dee372013-12-13 10:02:44 -0500157 if (!tree->mapping)
158 return NULL;
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400159 return btrfs_sb(tree->mapping->host->i_sb);
160}
Josef Bacik0b32f4b2012-03-13 09:38:00 -0400161
Chris Masond1310b22008-01-24 16:13:08 -0500162int __init extent_io_init(void)
163{
David Sterba837e1972012-09-07 03:00:48 -0600164 extent_state_cache = kmem_cache_create("btrfs_extent_state",
Christoph Hellwig9601e3f2009-04-13 15:33:09 +0200165 sizeof(struct extent_state), 0,
166 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500167 if (!extent_state_cache)
168 return -ENOMEM;
169
David Sterba837e1972012-09-07 03:00:48 -0600170 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
Christoph Hellwig9601e3f2009-04-13 15:33:09 +0200171 sizeof(struct extent_buffer), 0,
172 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500173 if (!extent_buffer_cache)
174 goto free_state_cache;
Chris Mason9be33952013-05-17 18:30:14 -0400175
176 btrfs_bioset = bioset_create(BIO_POOL_SIZE,
177 offsetof(struct btrfs_io_bio, bio));
178 if (!btrfs_bioset)
179 goto free_buffer_cache;
Darrick J. Wongb208c2f2013-09-19 20:37:07 -0700180
181 if (bioset_integrity_create(btrfs_bioset, BIO_POOL_SIZE))
182 goto free_bioset;
183
Chris Masond1310b22008-01-24 16:13:08 -0500184 return 0;
185
Darrick J. Wongb208c2f2013-09-19 20:37:07 -0700186free_bioset:
187 bioset_free(btrfs_bioset);
188 btrfs_bioset = NULL;
189
Chris Mason9be33952013-05-17 18:30:14 -0400190free_buffer_cache:
191 kmem_cache_destroy(extent_buffer_cache);
192 extent_buffer_cache = NULL;
193
Chris Masond1310b22008-01-24 16:13:08 -0500194free_state_cache:
195 kmem_cache_destroy(extent_state_cache);
Chris Mason9be33952013-05-17 18:30:14 -0400196 extent_state_cache = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500197 return -ENOMEM;
198}
199
200void extent_io_exit(void)
201{
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000202 btrfs_leak_debug_check();
Kirill A. Shutemov8c0a8532012-09-26 11:33:07 +1000203
204 /*
205 * Make sure all delayed rcu free are flushed before we
206 * destroy caches.
207 */
208 rcu_barrier();
Chris Masond1310b22008-01-24 16:13:08 -0500209 if (extent_state_cache)
210 kmem_cache_destroy(extent_state_cache);
211 if (extent_buffer_cache)
212 kmem_cache_destroy(extent_buffer_cache);
Chris Mason9be33952013-05-17 18:30:14 -0400213 if (btrfs_bioset)
214 bioset_free(btrfs_bioset);
Chris Masond1310b22008-01-24 16:13:08 -0500215}
216
217void extent_io_tree_init(struct extent_io_tree *tree,
David Sterbaf993c882011-04-20 23:35:57 +0200218 struct address_space *mapping)
Chris Masond1310b22008-01-24 16:13:08 -0500219{
Eric Paris6bef4d32010-02-23 19:43:04 +0000220 tree->state = RB_ROOT;
Chris Masond1310b22008-01-24 16:13:08 -0500221 tree->ops = NULL;
222 tree->dirty_bytes = 0;
Chris Mason70dec802008-01-29 09:59:12 -0500223 spin_lock_init(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500224 tree->mapping = mapping;
Chris Masond1310b22008-01-24 16:13:08 -0500225}
Chris Masond1310b22008-01-24 16:13:08 -0500226
Christoph Hellwigb2950862008-12-02 09:54:17 -0500227static struct extent_state *alloc_extent_state(gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500228{
229 struct extent_state *state;
Chris Masond1310b22008-01-24 16:13:08 -0500230
231 state = kmem_cache_alloc(extent_state_cache, mask);
Peter2b114d12008-04-01 11:21:40 -0400232 if (!state)
Chris Masond1310b22008-01-24 16:13:08 -0500233 return state;
234 state->state = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500235 state->private = 0;
Filipe Manana27a35072014-07-06 20:09:59 +0100236 RB_CLEAR_NODE(&state->rb_node);
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000237 btrfs_leak_debug_add(&state->leak_list, &states);
Chris Masond1310b22008-01-24 16:13:08 -0500238 atomic_set(&state->refs, 1);
239 init_waitqueue_head(&state->wq);
Jeff Mahoney143bede2012-03-01 14:56:26 +0100240 trace_alloc_extent_state(state, mask, _RET_IP_);
Chris Masond1310b22008-01-24 16:13:08 -0500241 return state;
242}
Chris Masond1310b22008-01-24 16:13:08 -0500243
Chris Mason4845e442010-05-25 20:56:50 -0400244void free_extent_state(struct extent_state *state)
Chris Masond1310b22008-01-24 16:13:08 -0500245{
Chris Masond1310b22008-01-24 16:13:08 -0500246 if (!state)
247 return;
248 if (atomic_dec_and_test(&state->refs)) {
Filipe Manana27a35072014-07-06 20:09:59 +0100249 WARN_ON(extent_state_in_tree(state));
Eric Sandeen6d49ba12013-04-22 16:12:31 +0000250 btrfs_leak_debug_del(&state->leak_list);
Jeff Mahoney143bede2012-03-01 14:56:26 +0100251 trace_free_extent_state(state, _RET_IP_);
Chris Masond1310b22008-01-24 16:13:08 -0500252 kmem_cache_free(extent_state_cache, state);
253 }
254}
Chris Masond1310b22008-01-24 16:13:08 -0500255
Filipe Mananaf2071b22014-02-12 15:05:53 +0000256static struct rb_node *tree_insert(struct rb_root *root,
257 struct rb_node *search_start,
258 u64 offset,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000259 struct rb_node *node,
260 struct rb_node ***p_in,
261 struct rb_node **parent_in)
Chris Masond1310b22008-01-24 16:13:08 -0500262{
Filipe Mananaf2071b22014-02-12 15:05:53 +0000263 struct rb_node **p;
Chris Masond3977122009-01-05 21:25:51 -0500264 struct rb_node *parent = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500265 struct tree_entry *entry;
266
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000267 if (p_in && parent_in) {
268 p = *p_in;
269 parent = *parent_in;
270 goto do_insert;
271 }
272
Filipe Mananaf2071b22014-02-12 15:05:53 +0000273 p = search_start ? &search_start : &root->rb_node;
Chris Masond3977122009-01-05 21:25:51 -0500274 while (*p) {
Chris Masond1310b22008-01-24 16:13:08 -0500275 parent = *p;
276 entry = rb_entry(parent, struct tree_entry, rb_node);
277
278 if (offset < entry->start)
279 p = &(*p)->rb_left;
280 else if (offset > entry->end)
281 p = &(*p)->rb_right;
282 else
283 return parent;
284 }
285
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000286do_insert:
Chris Masond1310b22008-01-24 16:13:08 -0500287 rb_link_node(node, parent, p);
288 rb_insert_color(node, root);
289 return NULL;
290}
291
Chris Mason80ea96b2008-02-01 14:51:59 -0500292static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000293 struct rb_node **prev_ret,
294 struct rb_node **next_ret,
295 struct rb_node ***p_ret,
296 struct rb_node **parent_ret)
Chris Masond1310b22008-01-24 16:13:08 -0500297{
Chris Mason80ea96b2008-02-01 14:51:59 -0500298 struct rb_root *root = &tree->state;
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000299 struct rb_node **n = &root->rb_node;
Chris Masond1310b22008-01-24 16:13:08 -0500300 struct rb_node *prev = NULL;
301 struct rb_node *orig_prev = NULL;
302 struct tree_entry *entry;
303 struct tree_entry *prev_entry = NULL;
304
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000305 while (*n) {
306 prev = *n;
307 entry = rb_entry(prev, struct tree_entry, rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500308 prev_entry = entry;
309
310 if (offset < entry->start)
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000311 n = &(*n)->rb_left;
Chris Masond1310b22008-01-24 16:13:08 -0500312 else if (offset > entry->end)
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000313 n = &(*n)->rb_right;
Chris Masond3977122009-01-05 21:25:51 -0500314 else
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000315 return *n;
Chris Masond1310b22008-01-24 16:13:08 -0500316 }
317
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000318 if (p_ret)
319 *p_ret = n;
320 if (parent_ret)
321 *parent_ret = prev;
322
Chris Masond1310b22008-01-24 16:13:08 -0500323 if (prev_ret) {
324 orig_prev = prev;
Chris Masond3977122009-01-05 21:25:51 -0500325 while (prev && offset > prev_entry->end) {
Chris Masond1310b22008-01-24 16:13:08 -0500326 prev = rb_next(prev);
327 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
328 }
329 *prev_ret = prev;
330 prev = orig_prev;
331 }
332
333 if (next_ret) {
334 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
Chris Masond3977122009-01-05 21:25:51 -0500335 while (prev && offset < prev_entry->start) {
Chris Masond1310b22008-01-24 16:13:08 -0500336 prev = rb_prev(prev);
337 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
338 }
339 *next_ret = prev;
340 }
341 return NULL;
342}
343
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000344static inline struct rb_node *
345tree_search_for_insert(struct extent_io_tree *tree,
346 u64 offset,
347 struct rb_node ***p_ret,
348 struct rb_node **parent_ret)
Chris Masond1310b22008-01-24 16:13:08 -0500349{
Chris Mason70dec802008-01-29 09:59:12 -0500350 struct rb_node *prev = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500351 struct rb_node *ret;
Chris Mason70dec802008-01-29 09:59:12 -0500352
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000353 ret = __etree_search(tree, offset, &prev, NULL, p_ret, parent_ret);
Chris Masond3977122009-01-05 21:25:51 -0500354 if (!ret)
Chris Masond1310b22008-01-24 16:13:08 -0500355 return prev;
356 return ret;
357}
358
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000359static inline struct rb_node *tree_search(struct extent_io_tree *tree,
360 u64 offset)
361{
362 return tree_search_for_insert(tree, offset, NULL, NULL);
363}
364
Josef Bacik9ed74f22009-09-11 16:12:44 -0400365static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
366 struct extent_state *other)
367{
368 if (tree->ops && tree->ops->merge_extent_hook)
369 tree->ops->merge_extent_hook(tree->mapping->host, new,
370 other);
371}
372
Chris Masond1310b22008-01-24 16:13:08 -0500373/*
374 * utility function to look for merge candidates inside a given range.
375 * Any extents with matching state are merged together into a single
376 * extent in the tree. Extents with EXTENT_IO in their state field
377 * are not merged because the end_io handlers need to be able to do
378 * operations on them without sleeping (or doing allocations/splits).
379 *
380 * This should be called with the tree lock held.
381 */
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000382static void merge_state(struct extent_io_tree *tree,
383 struct extent_state *state)
Chris Masond1310b22008-01-24 16:13:08 -0500384{
385 struct extent_state *other;
386 struct rb_node *other_node;
387
Zheng Yan5b21f2e2008-09-26 10:05:38 -0400388 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000389 return;
Chris Masond1310b22008-01-24 16:13:08 -0500390
391 other_node = rb_prev(&state->rb_node);
392 if (other_node) {
393 other = rb_entry(other_node, struct extent_state, rb_node);
394 if (other->end == state->start - 1 &&
395 other->state == state->state) {
Josef Bacik9ed74f22009-09-11 16:12:44 -0400396 merge_cb(tree, state, other);
Chris Masond1310b22008-01-24 16:13:08 -0500397 state->start = other->start;
Chris Masond1310b22008-01-24 16:13:08 -0500398 rb_erase(&other->rb_node, &tree->state);
Filipe Manana27a35072014-07-06 20:09:59 +0100399 RB_CLEAR_NODE(&other->rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500400 free_extent_state(other);
401 }
402 }
403 other_node = rb_next(&state->rb_node);
404 if (other_node) {
405 other = rb_entry(other_node, struct extent_state, rb_node);
406 if (other->start == state->end + 1 &&
407 other->state == state->state) {
Josef Bacik9ed74f22009-09-11 16:12:44 -0400408 merge_cb(tree, state, other);
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400409 state->end = other->end;
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400410 rb_erase(&other->rb_node, &tree->state);
Filipe Manana27a35072014-07-06 20:09:59 +0100411 RB_CLEAR_NODE(&other->rb_node);
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400412 free_extent_state(other);
Chris Masond1310b22008-01-24 16:13:08 -0500413 }
414 }
Chris Masond1310b22008-01-24 16:13:08 -0500415}
416
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000417static void set_state_cb(struct extent_io_tree *tree,
David Sterba9ee49a042015-01-14 19:52:13 +0100418 struct extent_state *state, unsigned *bits)
Chris Mason291d6732008-01-29 15:55:23 -0500419{
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000420 if (tree->ops && tree->ops->set_bit_hook)
421 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
Chris Mason291d6732008-01-29 15:55:23 -0500422}
423
424static void clear_state_cb(struct extent_io_tree *tree,
David Sterba9ee49a042015-01-14 19:52:13 +0100425 struct extent_state *state, unsigned *bits)
Chris Mason291d6732008-01-29 15:55:23 -0500426{
Josef Bacik9ed74f22009-09-11 16:12:44 -0400427 if (tree->ops && tree->ops->clear_bit_hook)
428 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
Chris Mason291d6732008-01-29 15:55:23 -0500429}
430
Xiao Guangrong3150b692011-07-14 03:19:08 +0000431static void set_state_bits(struct extent_io_tree *tree,
Qu Wenruod38ed272015-10-12 14:53:37 +0800432 struct extent_state *state, unsigned *bits,
433 struct extent_changeset *changeset);
Xiao Guangrong3150b692011-07-14 03:19:08 +0000434
Chris Masond1310b22008-01-24 16:13:08 -0500435/*
436 * insert an extent_state struct into the tree. 'bits' are set on the
437 * struct before it is inserted.
438 *
439 * This may return -EEXIST if the extent is already there, in which case the
440 * state struct is freed.
441 *
442 * The tree lock is not taken internally. This is a utility function and
443 * probably isn't what you want to call (see set/clear_extent_bit).
444 */
445static int insert_state(struct extent_io_tree *tree,
446 struct extent_state *state, u64 start, u64 end,
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000447 struct rb_node ***p,
448 struct rb_node **parent,
Qu Wenruod38ed272015-10-12 14:53:37 +0800449 unsigned *bits, struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500450{
451 struct rb_node *node;
452
Julia Lawall31b1a2b2012-11-03 10:58:34 +0000453 if (end < start)
Frank Holtonefe120a2013-12-20 11:37:06 -0500454 WARN(1, KERN_ERR "BTRFS: end < start %llu %llu\n",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200455 end, start);
Chris Masond1310b22008-01-24 16:13:08 -0500456 state->start = start;
457 state->end = end;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400458
Qu Wenruod38ed272015-10-12 14:53:37 +0800459 set_state_bits(tree, state, bits, changeset);
Xiao Guangrong3150b692011-07-14 03:19:08 +0000460
Filipe Mananaf2071b22014-02-12 15:05:53 +0000461 node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
Chris Masond1310b22008-01-24 16:13:08 -0500462 if (node) {
463 struct extent_state *found;
464 found = rb_entry(node, struct extent_state, rb_node);
Frank Holtonefe120a2013-12-20 11:37:06 -0500465 printk(KERN_ERR "BTRFS: found node %llu %llu on insert of "
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200466 "%llu %llu\n",
467 found->start, found->end, start, end);
Chris Masond1310b22008-01-24 16:13:08 -0500468 return -EEXIST;
469 }
470 merge_state(tree, state);
471 return 0;
472}
473
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000474static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
Josef Bacik9ed74f22009-09-11 16:12:44 -0400475 u64 split)
476{
477 if (tree->ops && tree->ops->split_extent_hook)
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000478 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400479}
480
Chris Masond1310b22008-01-24 16:13:08 -0500481/*
482 * split a given extent state struct in two, inserting the preallocated
483 * struct 'prealloc' as the newly created second half. 'split' indicates an
484 * offset inside 'orig' where it should be split.
485 *
486 * Before calling,
487 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
488 * are two extent state structs in the tree:
489 * prealloc: [orig->start, split - 1]
490 * orig: [ split, orig->end ]
491 *
492 * The tree locks are not taken by this function. They need to be held
493 * by the caller.
494 */
495static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
496 struct extent_state *prealloc, u64 split)
497{
498 struct rb_node *node;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400499
500 split_cb(tree, orig, split);
501
Chris Masond1310b22008-01-24 16:13:08 -0500502 prealloc->start = orig->start;
503 prealloc->end = split - 1;
504 prealloc->state = orig->state;
505 orig->start = split;
506
Filipe Mananaf2071b22014-02-12 15:05:53 +0000507 node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
508 &prealloc->rb_node, NULL, NULL);
Chris Masond1310b22008-01-24 16:13:08 -0500509 if (node) {
Chris Masond1310b22008-01-24 16:13:08 -0500510 free_extent_state(prealloc);
511 return -EEXIST;
512 }
513 return 0;
514}
515
Li Zefancdc6a392012-03-12 16:39:48 +0800516static struct extent_state *next_state(struct extent_state *state)
517{
518 struct rb_node *next = rb_next(&state->rb_node);
519 if (next)
520 return rb_entry(next, struct extent_state, rb_node);
521 else
522 return NULL;
523}
524
Chris Masond1310b22008-01-24 16:13:08 -0500525/*
526 * utility function to clear some bits in an extent state struct.
Wang Sheng-Hui1b303fc2012-04-06 14:35:18 +0800527 * it will optionally wake up any one waiting on this state (wake == 1).
Chris Masond1310b22008-01-24 16:13:08 -0500528 *
529 * If no bits are set on the state struct after clearing things, the
530 * struct is freed and removed from the tree
531 */
Li Zefancdc6a392012-03-12 16:39:48 +0800532static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
533 struct extent_state *state,
Qu Wenruofefdc552015-10-12 15:35:38 +0800534 unsigned *bits, int wake,
535 struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500536{
Li Zefancdc6a392012-03-12 16:39:48 +0800537 struct extent_state *next;
David Sterba9ee49a042015-01-14 19:52:13 +0100538 unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS;
Chris Masond1310b22008-01-24 16:13:08 -0500539
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400540 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500541 u64 range = state->end - state->start + 1;
542 WARN_ON(range > tree->dirty_bytes);
543 tree->dirty_bytes -= range;
544 }
Chris Mason291d6732008-01-29 15:55:23 -0500545 clear_state_cb(tree, state, bits);
Qu Wenruofefdc552015-10-12 15:35:38 +0800546 add_extent_changeset(state, bits_to_clear, changeset, 0);
Josef Bacik32c00af2009-10-08 13:34:05 -0400547 state->state &= ~bits_to_clear;
Chris Masond1310b22008-01-24 16:13:08 -0500548 if (wake)
549 wake_up(&state->wq);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400550 if (state->state == 0) {
Li Zefancdc6a392012-03-12 16:39:48 +0800551 next = next_state(state);
Filipe Manana27a35072014-07-06 20:09:59 +0100552 if (extent_state_in_tree(state)) {
Chris Masond1310b22008-01-24 16:13:08 -0500553 rb_erase(&state->rb_node, &tree->state);
Filipe Manana27a35072014-07-06 20:09:59 +0100554 RB_CLEAR_NODE(&state->rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500555 free_extent_state(state);
556 } else {
557 WARN_ON(1);
558 }
559 } else {
560 merge_state(tree, state);
Li Zefancdc6a392012-03-12 16:39:48 +0800561 next = next_state(state);
Chris Masond1310b22008-01-24 16:13:08 -0500562 }
Li Zefancdc6a392012-03-12 16:39:48 +0800563 return next;
Chris Masond1310b22008-01-24 16:13:08 -0500564}
565
Xiao Guangrong82337672011-04-20 06:44:57 +0000566static struct extent_state *
567alloc_extent_state_atomic(struct extent_state *prealloc)
568{
569 if (!prealloc)
570 prealloc = alloc_extent_state(GFP_ATOMIC);
571
572 return prealloc;
573}
574
Eric Sandeen48a3b632013-04-25 20:41:01 +0000575static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400576{
577 btrfs_panic(tree_fs_info(tree), err, "Locking error: "
578 "Extent tree was modified by another "
579 "thread while locked.");
580}
581
Chris Masond1310b22008-01-24 16:13:08 -0500582/*
583 * clear some bits on a range in the tree. This may require splitting
584 * or inserting elements in the tree, so the gfp mask is used to
585 * indicate which allocations or sleeping are allowed.
586 *
587 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
588 * the given range from the tree regardless of state (ie for truncate).
589 *
590 * the range [start, end] is inclusive.
591 *
Jeff Mahoney6763af82012-03-01 14:56:29 +0100592 * This takes the tree lock, and returns 0 on success and < 0 on error.
Chris Masond1310b22008-01-24 16:13:08 -0500593 */
Qu Wenruofefdc552015-10-12 15:35:38 +0800594static int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
595 unsigned bits, int wake, int delete,
596 struct extent_state **cached_state,
597 gfp_t mask, struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500598{
599 struct extent_state *state;
Chris Mason2c64c532009-09-02 15:04:12 -0400600 struct extent_state *cached;
Chris Masond1310b22008-01-24 16:13:08 -0500601 struct extent_state *prealloc = NULL;
602 struct rb_node *node;
Yan Zheng5c939df2009-05-27 09:16:03 -0400603 u64 last_end;
Chris Masond1310b22008-01-24 16:13:08 -0500604 int err;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000605 int clear = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500606
Josef Bacika5dee372013-12-13 10:02:44 -0500607 btrfs_debug_check_extent_io_range(tree, start, end);
David Sterba8d599ae2013-04-30 15:22:23 +0000608
Josef Bacik7ee9e442013-06-21 16:37:03 -0400609 if (bits & EXTENT_DELALLOC)
610 bits |= EXTENT_NORESERVE;
611
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400612 if (delete)
613 bits |= ~EXTENT_CTLBITS;
614 bits |= EXTENT_FIRST_DELALLOC;
615
Josef Bacik2ac55d42010-02-03 19:33:23 +0000616 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
617 clear = 1;
Chris Masond1310b22008-01-24 16:13:08 -0500618again:
Mel Gormand0164ad2015-11-06 16:28:21 -0800619 if (!prealloc && gfpflags_allow_blocking(mask)) {
Filipe Mananac7bc6312014-11-03 14:12:57 +0000620 /*
621 * Don't care for allocation failure here because we might end
622 * up not needing the pre-allocated extent state at all, which
623 * is the case if we only have in the tree extent states that
624 * cover our input range and don't cover too any other range.
625 * If we end up needing a new extent state we allocate it later.
626 */
Chris Masond1310b22008-01-24 16:13:08 -0500627 prealloc = alloc_extent_state(mask);
Chris Masond1310b22008-01-24 16:13:08 -0500628 }
629
Chris Masoncad321a2008-12-17 14:51:42 -0500630 spin_lock(&tree->lock);
Chris Mason2c64c532009-09-02 15:04:12 -0400631 if (cached_state) {
632 cached = *cached_state;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000633
634 if (clear) {
635 *cached_state = NULL;
636 cached_state = NULL;
637 }
638
Filipe Manana27a35072014-07-06 20:09:59 +0100639 if (cached && extent_state_in_tree(cached) &&
640 cached->start <= start && cached->end > start) {
Josef Bacik2ac55d42010-02-03 19:33:23 +0000641 if (clear)
642 atomic_dec(&cached->refs);
Chris Mason2c64c532009-09-02 15:04:12 -0400643 state = cached;
Chris Mason42daec22009-09-23 19:51:09 -0400644 goto hit_next;
Chris Mason2c64c532009-09-02 15:04:12 -0400645 }
Josef Bacik2ac55d42010-02-03 19:33:23 +0000646 if (clear)
647 free_extent_state(cached);
Chris Mason2c64c532009-09-02 15:04:12 -0400648 }
Chris Masond1310b22008-01-24 16:13:08 -0500649 /*
650 * this search will find the extents that end after
651 * our range starts
652 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500653 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500654 if (!node)
655 goto out;
656 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason2c64c532009-09-02 15:04:12 -0400657hit_next:
Chris Masond1310b22008-01-24 16:13:08 -0500658 if (state->start > end)
659 goto out;
660 WARN_ON(state->end < start);
Yan Zheng5c939df2009-05-27 09:16:03 -0400661 last_end = state->end;
Chris Masond1310b22008-01-24 16:13:08 -0500662
Liu Bo04493142012-02-16 18:34:37 +0800663 /* the state doesn't have the wanted bits, go ahead */
Li Zefancdc6a392012-03-12 16:39:48 +0800664 if (!(state->state & bits)) {
665 state = next_state(state);
Liu Bo04493142012-02-16 18:34:37 +0800666 goto next;
Li Zefancdc6a392012-03-12 16:39:48 +0800667 }
Liu Bo04493142012-02-16 18:34:37 +0800668
Chris Masond1310b22008-01-24 16:13:08 -0500669 /*
670 * | ---- desired range ---- |
671 * | state | or
672 * | ------------- state -------------- |
673 *
674 * We need to split the extent we found, and may flip
675 * bits on second half.
676 *
677 * If the extent we found extends past our range, we
678 * just split and search again. It'll get split again
679 * the next time though.
680 *
681 * If the extent we found is inside our range, we clear
682 * the desired bit on it.
683 */
684
685 if (state->start < start) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000686 prealloc = alloc_extent_state_atomic(prealloc);
687 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500688 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400689 if (err)
690 extent_io_tree_panic(tree, err);
691
Chris Masond1310b22008-01-24 16:13:08 -0500692 prealloc = NULL;
693 if (err)
694 goto out;
695 if (state->end <= end) {
Qu Wenruofefdc552015-10-12 15:35:38 +0800696 state = clear_state_bit(tree, state, &bits, wake,
697 changeset);
Liu Bod1ac6e42012-05-10 18:10:39 +0800698 goto next;
Chris Masond1310b22008-01-24 16:13:08 -0500699 }
700 goto search_again;
701 }
702 /*
703 * | ---- desired range ---- |
704 * | state |
705 * We need to split the extent, and clear the bit
706 * on the first half
707 */
708 if (state->start <= end && state->end > end) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000709 prealloc = alloc_extent_state_atomic(prealloc);
710 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500711 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400712 if (err)
713 extent_io_tree_panic(tree, err);
714
Chris Masond1310b22008-01-24 16:13:08 -0500715 if (wake)
716 wake_up(&state->wq);
Chris Mason42daec22009-09-23 19:51:09 -0400717
Qu Wenruofefdc552015-10-12 15:35:38 +0800718 clear_state_bit(tree, prealloc, &bits, wake, changeset);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400719
Chris Masond1310b22008-01-24 16:13:08 -0500720 prealloc = NULL;
721 goto out;
722 }
Chris Mason42daec22009-09-23 19:51:09 -0400723
Qu Wenruofefdc552015-10-12 15:35:38 +0800724 state = clear_state_bit(tree, state, &bits, wake, changeset);
Liu Bo04493142012-02-16 18:34:37 +0800725next:
Yan Zheng5c939df2009-05-27 09:16:03 -0400726 if (last_end == (u64)-1)
727 goto out;
728 start = last_end + 1;
Li Zefancdc6a392012-03-12 16:39:48 +0800729 if (start <= end && state && !need_resched())
Liu Bo692e5752012-02-16 18:34:36 +0800730 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -0500731 goto search_again;
732
733out:
Chris Masoncad321a2008-12-17 14:51:42 -0500734 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500735 if (prealloc)
736 free_extent_state(prealloc);
737
Jeff Mahoney6763af82012-03-01 14:56:29 +0100738 return 0;
Chris Masond1310b22008-01-24 16:13:08 -0500739
740search_again:
741 if (start > end)
742 goto out;
Chris Masoncad321a2008-12-17 14:51:42 -0500743 spin_unlock(&tree->lock);
Mel Gormand0164ad2015-11-06 16:28:21 -0800744 if (gfpflags_allow_blocking(mask))
Chris Masond1310b22008-01-24 16:13:08 -0500745 cond_resched();
746 goto again;
747}
Chris Masond1310b22008-01-24 16:13:08 -0500748
Jeff Mahoney143bede2012-03-01 14:56:26 +0100749static void wait_on_state(struct extent_io_tree *tree,
750 struct extent_state *state)
Christoph Hellwig641f5212008-12-02 06:36:10 -0500751 __releases(tree->lock)
752 __acquires(tree->lock)
Chris Masond1310b22008-01-24 16:13:08 -0500753{
754 DEFINE_WAIT(wait);
755 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
Chris Masoncad321a2008-12-17 14:51:42 -0500756 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500757 schedule();
Chris Masoncad321a2008-12-17 14:51:42 -0500758 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500759 finish_wait(&state->wq, &wait);
Chris Masond1310b22008-01-24 16:13:08 -0500760}
761
762/*
763 * waits for one or more bits to clear on a range in the state tree.
764 * The range [start, end] is inclusive.
765 * The tree lock is taken by this function
766 */
David Sterba41074882013-04-29 13:38:46 +0000767static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
768 unsigned long bits)
Chris Masond1310b22008-01-24 16:13:08 -0500769{
770 struct extent_state *state;
771 struct rb_node *node;
772
Josef Bacika5dee372013-12-13 10:02:44 -0500773 btrfs_debug_check_extent_io_range(tree, start, end);
David Sterba8d599ae2013-04-30 15:22:23 +0000774
Chris Masoncad321a2008-12-17 14:51:42 -0500775 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500776again:
777 while (1) {
778 /*
779 * this search will find all the extents that end after
780 * our range starts
781 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500782 node = tree_search(tree, start);
Filipe Mananac50d3e72014-03-31 14:53:25 +0100783process_node:
Chris Masond1310b22008-01-24 16:13:08 -0500784 if (!node)
785 break;
786
787 state = rb_entry(node, struct extent_state, rb_node);
788
789 if (state->start > end)
790 goto out;
791
792 if (state->state & bits) {
793 start = state->start;
794 atomic_inc(&state->refs);
795 wait_on_state(tree, state);
796 free_extent_state(state);
797 goto again;
798 }
799 start = state->end + 1;
800
801 if (start > end)
802 break;
803
Filipe Mananac50d3e72014-03-31 14:53:25 +0100804 if (!cond_resched_lock(&tree->lock)) {
805 node = rb_next(node);
806 goto process_node;
807 }
Chris Masond1310b22008-01-24 16:13:08 -0500808 }
809out:
Chris Masoncad321a2008-12-17 14:51:42 -0500810 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500811}
Chris Masond1310b22008-01-24 16:13:08 -0500812
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000813static void set_state_bits(struct extent_io_tree *tree,
Chris Masond1310b22008-01-24 16:13:08 -0500814 struct extent_state *state,
Qu Wenruod38ed272015-10-12 14:53:37 +0800815 unsigned *bits, struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500816{
David Sterba9ee49a042015-01-14 19:52:13 +0100817 unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400818
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000819 set_state_cb(tree, state, bits);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400820 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500821 u64 range = state->end - state->start + 1;
822 tree->dirty_bytes += range;
823 }
Qu Wenruod38ed272015-10-12 14:53:37 +0800824 add_extent_changeset(state, bits_to_set, changeset, 1);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400825 state->state |= bits_to_set;
Chris Masond1310b22008-01-24 16:13:08 -0500826}
827
Filipe Mananae38e2ed2014-10-13 12:28:38 +0100828static void cache_state_if_flags(struct extent_state *state,
829 struct extent_state **cached_ptr,
David Sterba9ee49a042015-01-14 19:52:13 +0100830 unsigned flags)
Chris Mason2c64c532009-09-02 15:04:12 -0400831{
832 if (cached_ptr && !(*cached_ptr)) {
Filipe Mananae38e2ed2014-10-13 12:28:38 +0100833 if (!flags || (state->state & flags)) {
Chris Mason2c64c532009-09-02 15:04:12 -0400834 *cached_ptr = state;
835 atomic_inc(&state->refs);
836 }
837 }
838}
839
Filipe Mananae38e2ed2014-10-13 12:28:38 +0100840static void cache_state(struct extent_state *state,
841 struct extent_state **cached_ptr)
842{
843 return cache_state_if_flags(state, cached_ptr,
844 EXTENT_IOBITS | EXTENT_BOUNDARY);
845}
846
Chris Masond1310b22008-01-24 16:13:08 -0500847/*
Chris Mason1edbb732009-09-02 13:24:36 -0400848 * set some bits on a range in the tree. This may require allocations or
849 * sleeping, so the gfp mask is used to indicate what is allowed.
Chris Masond1310b22008-01-24 16:13:08 -0500850 *
Chris Mason1edbb732009-09-02 13:24:36 -0400851 * If any of the exclusive bits are set, this will fail with -EEXIST if some
852 * part of the range already has the desired bits set. The start of the
853 * existing range is returned in failed_start in this case.
Chris Masond1310b22008-01-24 16:13:08 -0500854 *
Chris Mason1edbb732009-09-02 13:24:36 -0400855 * [start, end] is inclusive This takes the tree lock.
Chris Masond1310b22008-01-24 16:13:08 -0500856 */
Chris Mason1edbb732009-09-02 13:24:36 -0400857
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +0100858static int __must_check
859__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +0100860 unsigned bits, unsigned exclusive_bits,
David Sterba41074882013-04-29 13:38:46 +0000861 u64 *failed_start, struct extent_state **cached_state,
Qu Wenruod38ed272015-10-12 14:53:37 +0800862 gfp_t mask, struct extent_changeset *changeset)
Chris Masond1310b22008-01-24 16:13:08 -0500863{
864 struct extent_state *state;
865 struct extent_state *prealloc = NULL;
866 struct rb_node *node;
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000867 struct rb_node **p;
868 struct rb_node *parent;
Chris Masond1310b22008-01-24 16:13:08 -0500869 int err = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500870 u64 last_start;
871 u64 last_end;
Chris Mason42daec22009-09-23 19:51:09 -0400872
Josef Bacika5dee372013-12-13 10:02:44 -0500873 btrfs_debug_check_extent_io_range(tree, start, end);
David Sterba8d599ae2013-04-30 15:22:23 +0000874
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400875 bits |= EXTENT_FIRST_DELALLOC;
Chris Masond1310b22008-01-24 16:13:08 -0500876again:
Mel Gormand0164ad2015-11-06 16:28:21 -0800877 if (!prealloc && gfpflags_allow_blocking(mask)) {
Chris Masond1310b22008-01-24 16:13:08 -0500878 prealloc = alloc_extent_state(mask);
Xiao Guangrong82337672011-04-20 06:44:57 +0000879 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500880 }
881
Chris Masoncad321a2008-12-17 14:51:42 -0500882 spin_lock(&tree->lock);
Chris Mason9655d292009-09-02 15:22:30 -0400883 if (cached_state && *cached_state) {
884 state = *cached_state;
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400885 if (state->start <= start && state->end > start &&
Filipe Manana27a35072014-07-06 20:09:59 +0100886 extent_state_in_tree(state)) {
Chris Mason9655d292009-09-02 15:22:30 -0400887 node = &state->rb_node;
888 goto hit_next;
889 }
890 }
Chris Masond1310b22008-01-24 16:13:08 -0500891 /*
892 * this search will find all the extents that end after
893 * our range starts.
894 */
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000895 node = tree_search_for_insert(tree, start, &p, &parent);
Chris Masond1310b22008-01-24 16:13:08 -0500896 if (!node) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000897 prealloc = alloc_extent_state_atomic(prealloc);
898 BUG_ON(!prealloc);
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +0000899 err = insert_state(tree, prealloc, start, end,
Qu Wenruod38ed272015-10-12 14:53:37 +0800900 &p, &parent, &bits, changeset);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400901 if (err)
902 extent_io_tree_panic(tree, err);
903
Filipe David Borba Mananac42ac0b2013-11-26 15:01:34 +0000904 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500905 prealloc = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500906 goto out;
907 }
Chris Masond1310b22008-01-24 16:13:08 -0500908 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason40431d62009-08-05 12:57:59 -0400909hit_next:
Chris Masond1310b22008-01-24 16:13:08 -0500910 last_start = state->start;
911 last_end = state->end;
912
913 /*
914 * | ---- desired range ---- |
915 * | state |
916 *
917 * Just lock what we found and keep going
918 */
919 if (state->start == start && state->end <= end) {
Chris Mason1edbb732009-09-02 13:24:36 -0400920 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -0500921 *failed_start = state->start;
922 err = -EEXIST;
923 goto out;
924 }
Chris Mason42daec22009-09-23 19:51:09 -0400925
Qu Wenruod38ed272015-10-12 14:53:37 +0800926 set_state_bits(tree, state, &bits, changeset);
Chris Mason2c64c532009-09-02 15:04:12 -0400927 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500928 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -0400929 if (last_end == (u64)-1)
930 goto out;
931 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +0800932 state = next_state(state);
933 if (start < end && state && state->start == start &&
934 !need_resched())
935 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -0500936 goto search_again;
937 }
938
939 /*
940 * | ---- desired range ---- |
941 * | state |
942 * or
943 * | ------------- state -------------- |
944 *
945 * We need to split the extent we found, and may flip bits on
946 * second half.
947 *
948 * If the extent we found extends past our
949 * range, we just split and search again. It'll get split
950 * again the next time though.
951 *
952 * If the extent we found is inside our range, we set the
953 * desired bit on it.
954 */
955 if (state->start < start) {
Chris Mason1edbb732009-09-02 13:24:36 -0400956 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -0500957 *failed_start = start;
958 err = -EEXIST;
959 goto out;
960 }
Xiao Guangrong82337672011-04-20 06:44:57 +0000961
962 prealloc = alloc_extent_state_atomic(prealloc);
963 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500964 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400965 if (err)
966 extent_io_tree_panic(tree, err);
967
Chris Masond1310b22008-01-24 16:13:08 -0500968 prealloc = NULL;
969 if (err)
970 goto out;
971 if (state->end <= end) {
Qu Wenruod38ed272015-10-12 14:53:37 +0800972 set_state_bits(tree, state, &bits, changeset);
Chris Mason2c64c532009-09-02 15:04:12 -0400973 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500974 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -0400975 if (last_end == (u64)-1)
976 goto out;
977 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +0800978 state = next_state(state);
979 if (start < end && state && state->start == start &&
980 !need_resched())
981 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -0500982 }
983 goto search_again;
984 }
985 /*
986 * | ---- desired range ---- |
987 * | state | or | state |
988 *
989 * There's a hole, we need to insert something in it and
990 * ignore the extent we found.
991 */
992 if (state->start > start) {
993 u64 this_end;
994 if (end < last_start)
995 this_end = end;
996 else
Chris Masond3977122009-01-05 21:25:51 -0500997 this_end = last_start - 1;
Xiao Guangrong82337672011-04-20 06:44:57 +0000998
999 prealloc = alloc_extent_state_atomic(prealloc);
1000 BUG_ON(!prealloc);
Xiao Guangrongc7f895a2011-04-20 06:45:49 +00001001
1002 /*
1003 * Avoid to free 'prealloc' if it can be merged with
1004 * the later extent.
1005 */
Chris Masond1310b22008-01-24 16:13:08 -05001006 err = insert_state(tree, prealloc, start, this_end,
Qu Wenruod38ed272015-10-12 14:53:37 +08001007 NULL, NULL, &bits, changeset);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001008 if (err)
1009 extent_io_tree_panic(tree, err);
1010
Chris Mason2c64c532009-09-02 15:04:12 -04001011 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001012 prealloc = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05001013 start = this_end + 1;
1014 goto search_again;
1015 }
1016 /*
1017 * | ---- desired range ---- |
1018 * | state |
1019 * We need to split the extent, and set the bit
1020 * on the first half
1021 */
1022 if (state->start <= end && state->end > end) {
Chris Mason1edbb732009-09-02 13:24:36 -04001023 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -05001024 *failed_start = start;
1025 err = -EEXIST;
1026 goto out;
1027 }
Xiao Guangrong82337672011-04-20 06:44:57 +00001028
1029 prealloc = alloc_extent_state_atomic(prealloc);
1030 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -05001031 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001032 if (err)
1033 extent_io_tree_panic(tree, err);
Chris Masond1310b22008-01-24 16:13:08 -05001034
Qu Wenruod38ed272015-10-12 14:53:37 +08001035 set_state_bits(tree, prealloc, &bits, changeset);
Chris Mason2c64c532009-09-02 15:04:12 -04001036 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05001037 merge_state(tree, prealloc);
1038 prealloc = NULL;
1039 goto out;
1040 }
1041
1042 goto search_again;
1043
1044out:
Chris Masoncad321a2008-12-17 14:51:42 -05001045 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001046 if (prealloc)
1047 free_extent_state(prealloc);
1048
1049 return err;
1050
1051search_again:
1052 if (start > end)
1053 goto out;
Chris Masoncad321a2008-12-17 14:51:42 -05001054 spin_unlock(&tree->lock);
Mel Gormand0164ad2015-11-06 16:28:21 -08001055 if (gfpflags_allow_blocking(mask))
Chris Masond1310b22008-01-24 16:13:08 -05001056 cond_resched();
1057 goto again;
1058}
Chris Masond1310b22008-01-24 16:13:08 -05001059
David Sterba41074882013-04-29 13:38:46 +00001060int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +01001061 unsigned bits, u64 * failed_start,
David Sterba41074882013-04-29 13:38:46 +00001062 struct extent_state **cached_state, gfp_t mask)
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001063{
1064 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
Qu Wenruod38ed272015-10-12 14:53:37 +08001065 cached_state, mask, NULL);
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001066}
1067
1068
Josef Bacik462d6fa2011-09-26 13:56:12 -04001069/**
Liu Bo10983f22012-07-11 15:26:19 +08001070 * convert_extent_bit - convert all bits in a given range from one bit to
1071 * another
Josef Bacik462d6fa2011-09-26 13:56:12 -04001072 * @tree: the io tree to search
1073 * @start: the start offset in bytes
1074 * @end: the end offset in bytes (inclusive)
1075 * @bits: the bits to set in this range
1076 * @clear_bits: the bits to clear in this range
Josef Bacike6138872012-09-27 17:07:30 -04001077 * @cached_state: state that we're going to cache
Josef Bacik462d6fa2011-09-26 13:56:12 -04001078 * @mask: the allocation mask
1079 *
1080 * This will go through and set bits for the given range. If any states exist
1081 * already in this range they are set with the given bit and cleared of the
1082 * clear_bits. This is only meant to be used by things that are mergeable, ie
1083 * converting from say DELALLOC to DIRTY. This is not meant to be used with
1084 * boundary bits like LOCK.
1085 */
1086int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +01001087 unsigned bits, unsigned clear_bits,
Josef Bacike6138872012-09-27 17:07:30 -04001088 struct extent_state **cached_state, gfp_t mask)
Josef Bacik462d6fa2011-09-26 13:56:12 -04001089{
1090 struct extent_state *state;
1091 struct extent_state *prealloc = NULL;
1092 struct rb_node *node;
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001093 struct rb_node **p;
1094 struct rb_node *parent;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001095 int err = 0;
1096 u64 last_start;
1097 u64 last_end;
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001098 bool first_iteration = true;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001099
Josef Bacika5dee372013-12-13 10:02:44 -05001100 btrfs_debug_check_extent_io_range(tree, start, end);
David Sterba8d599ae2013-04-30 15:22:23 +00001101
Josef Bacik462d6fa2011-09-26 13:56:12 -04001102again:
Mel Gormand0164ad2015-11-06 16:28:21 -08001103 if (!prealloc && gfpflags_allow_blocking(mask)) {
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001104 /*
1105 * Best effort, don't worry if extent state allocation fails
1106 * here for the first iteration. We might have a cached state
1107 * that matches exactly the target range, in which case no
1108 * extent state allocations are needed. We'll only know this
1109 * after locking the tree.
1110 */
Josef Bacik462d6fa2011-09-26 13:56:12 -04001111 prealloc = alloc_extent_state(mask);
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001112 if (!prealloc && !first_iteration)
Josef Bacik462d6fa2011-09-26 13:56:12 -04001113 return -ENOMEM;
1114 }
1115
1116 spin_lock(&tree->lock);
Josef Bacike6138872012-09-27 17:07:30 -04001117 if (cached_state && *cached_state) {
1118 state = *cached_state;
1119 if (state->start <= start && state->end > start &&
Filipe Manana27a35072014-07-06 20:09:59 +01001120 extent_state_in_tree(state)) {
Josef Bacike6138872012-09-27 17:07:30 -04001121 node = &state->rb_node;
1122 goto hit_next;
1123 }
1124 }
1125
Josef Bacik462d6fa2011-09-26 13:56:12 -04001126 /*
1127 * this search will find all the extents that end after
1128 * our range starts.
1129 */
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001130 node = tree_search_for_insert(tree, start, &p, &parent);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001131 if (!node) {
1132 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001133 if (!prealloc) {
1134 err = -ENOMEM;
1135 goto out;
1136 }
Filipe David Borba Manana12cfbad2013-11-26 15:41:47 +00001137 err = insert_state(tree, prealloc, start, end,
Qu Wenruod38ed272015-10-12 14:53:37 +08001138 &p, &parent, &bits, NULL);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001139 if (err)
1140 extent_io_tree_panic(tree, err);
Filipe David Borba Mananac42ac0b2013-11-26 15:01:34 +00001141 cache_state(prealloc, cached_state);
1142 prealloc = NULL;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001143 goto out;
1144 }
1145 state = rb_entry(node, struct extent_state, rb_node);
1146hit_next:
1147 last_start = state->start;
1148 last_end = state->end;
1149
1150 /*
1151 * | ---- desired range ---- |
1152 * | state |
1153 *
1154 * Just lock what we found and keep going
1155 */
1156 if (state->start == start && state->end <= end) {
Qu Wenruod38ed272015-10-12 14:53:37 +08001157 set_state_bits(tree, state, &bits, NULL);
Josef Bacike6138872012-09-27 17:07:30 -04001158 cache_state(state, cached_state);
Qu Wenruofefdc552015-10-12 15:35:38 +08001159 state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001160 if (last_end == (u64)-1)
1161 goto out;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001162 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001163 if (start < end && state && state->start == start &&
1164 !need_resched())
1165 goto hit_next;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001166 goto search_again;
1167 }
1168
1169 /*
1170 * | ---- desired range ---- |
1171 * | state |
1172 * or
1173 * | ------------- state -------------- |
1174 *
1175 * We need to split the extent we found, and may flip bits on
1176 * second half.
1177 *
1178 * If the extent we found extends past our
1179 * range, we just split and search again. It'll get split
1180 * again the next time though.
1181 *
1182 * If the extent we found is inside our range, we set the
1183 * desired bit on it.
1184 */
1185 if (state->start < start) {
1186 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001187 if (!prealloc) {
1188 err = -ENOMEM;
1189 goto out;
1190 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001191 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001192 if (err)
1193 extent_io_tree_panic(tree, err);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001194 prealloc = NULL;
1195 if (err)
1196 goto out;
1197 if (state->end <= end) {
Qu Wenruod38ed272015-10-12 14:53:37 +08001198 set_state_bits(tree, state, &bits, NULL);
Josef Bacike6138872012-09-27 17:07:30 -04001199 cache_state(state, cached_state);
Qu Wenruofefdc552015-10-12 15:35:38 +08001200 state = clear_state_bit(tree, state, &clear_bits, 0,
1201 NULL);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001202 if (last_end == (u64)-1)
1203 goto out;
1204 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001205 if (start < end && state && state->start == start &&
1206 !need_resched())
1207 goto hit_next;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001208 }
1209 goto search_again;
1210 }
1211 /*
1212 * | ---- desired range ---- |
1213 * | state | or | state |
1214 *
1215 * There's a hole, we need to insert something in it and
1216 * ignore the extent we found.
1217 */
1218 if (state->start > start) {
1219 u64 this_end;
1220 if (end < last_start)
1221 this_end = end;
1222 else
1223 this_end = last_start - 1;
1224
1225 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001226 if (!prealloc) {
1227 err = -ENOMEM;
1228 goto out;
1229 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001230
1231 /*
1232 * Avoid to free 'prealloc' if it can be merged with
1233 * the later extent.
1234 */
1235 err = insert_state(tree, prealloc, start, this_end,
Qu Wenruod38ed272015-10-12 14:53:37 +08001236 NULL, NULL, &bits, NULL);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001237 if (err)
1238 extent_io_tree_panic(tree, err);
Josef Bacike6138872012-09-27 17:07:30 -04001239 cache_state(prealloc, cached_state);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001240 prealloc = NULL;
1241 start = this_end + 1;
1242 goto search_again;
1243 }
1244 /*
1245 * | ---- desired range ---- |
1246 * | state |
1247 * We need to split the extent, and set the bit
1248 * on the first half
1249 */
1250 if (state->start <= end && state->end > end) {
1251 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001252 if (!prealloc) {
1253 err = -ENOMEM;
1254 goto out;
1255 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001256
1257 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001258 if (err)
1259 extent_io_tree_panic(tree, err);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001260
Qu Wenruod38ed272015-10-12 14:53:37 +08001261 set_state_bits(tree, prealloc, &bits, NULL);
Josef Bacike6138872012-09-27 17:07:30 -04001262 cache_state(prealloc, cached_state);
Qu Wenruofefdc552015-10-12 15:35:38 +08001263 clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001264 prealloc = NULL;
1265 goto out;
1266 }
1267
1268 goto search_again;
1269
1270out:
1271 spin_unlock(&tree->lock);
1272 if (prealloc)
1273 free_extent_state(prealloc);
1274
1275 return err;
1276
1277search_again:
1278 if (start > end)
1279 goto out;
1280 spin_unlock(&tree->lock);
Mel Gormand0164ad2015-11-06 16:28:21 -08001281 if (gfpflags_allow_blocking(mask))
Josef Bacik462d6fa2011-09-26 13:56:12 -04001282 cond_resched();
Filipe Mananac8fd3de2014-10-13 12:28:39 +01001283 first_iteration = false;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001284 goto again;
1285}
1286
Chris Masond1310b22008-01-24 16:13:08 -05001287/* wrappers around set/clear extent bit */
Qu Wenruod38ed272015-10-12 14:53:37 +08001288int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1289 unsigned bits, gfp_t mask,
1290 struct extent_changeset *changeset)
1291{
1292 /*
1293 * We don't support EXTENT_LOCKED yet, as current changeset will
1294 * record any bits changed, so for EXTENT_LOCKED case, it will
1295 * either fail with -EEXIST or changeset will record the whole
1296 * range.
1297 */
1298 BUG_ON(bits & EXTENT_LOCKED);
1299
1300 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, mask,
1301 changeset);
1302}
1303
Qu Wenruofefdc552015-10-12 15:35:38 +08001304int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1305 unsigned bits, int wake, int delete,
1306 struct extent_state **cached, gfp_t mask)
1307{
1308 return __clear_extent_bit(tree, start, end, bits, wake, delete,
1309 cached, mask, NULL);
1310}
1311
Qu Wenruofefdc552015-10-12 15:35:38 +08001312int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1313 unsigned bits, gfp_t mask,
1314 struct extent_changeset *changeset)
1315{
1316 /*
1317 * Don't support EXTENT_LOCKED case, same reason as
1318 * set_record_extent_bits().
1319 */
1320 BUG_ON(bits & EXTENT_LOCKED);
1321
1322 return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask,
1323 changeset);
1324}
1325
Chris Masond352ac62008-09-29 15:18:18 -04001326/*
1327 * either insert or lock state struct between start and end use mask to tell
1328 * us if waiting is desired.
1329 */
Chris Mason1edbb732009-09-02 13:24:36 -04001330int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
David Sterbaff13db42015-12-03 14:30:40 +01001331 struct extent_state **cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05001332{
1333 int err;
1334 u64 failed_start;
David Sterba9ee49a042015-01-14 19:52:13 +01001335
Chris Masond1310b22008-01-24 16:13:08 -05001336 while (1) {
David Sterbaff13db42015-12-03 14:30:40 +01001337 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001338 EXTENT_LOCKED, &failed_start,
Qu Wenruod38ed272015-10-12 14:53:37 +08001339 cached_state, GFP_NOFS, NULL);
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001340 if (err == -EEXIST) {
Chris Masond1310b22008-01-24 16:13:08 -05001341 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1342 start = failed_start;
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001343 } else
Chris Masond1310b22008-01-24 16:13:08 -05001344 break;
Chris Masond1310b22008-01-24 16:13:08 -05001345 WARN_ON(start > end);
1346 }
1347 return err;
1348}
Chris Masond1310b22008-01-24 16:13:08 -05001349
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001350int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
Josef Bacik25179202008-10-29 14:49:05 -04001351{
1352 int err;
1353 u64 failed_start;
1354
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001355 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
Qu Wenruod38ed272015-10-12 14:53:37 +08001356 &failed_start, NULL, GFP_NOFS, NULL);
Yan Zheng66435582008-10-30 14:19:50 -04001357 if (err == -EEXIST) {
1358 if (failed_start > start)
1359 clear_extent_bit(tree, start, failed_start - 1,
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001360 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
Josef Bacik25179202008-10-29 14:49:05 -04001361 return 0;
Yan Zheng66435582008-10-30 14:19:50 -04001362 }
Josef Bacik25179202008-10-29 14:49:05 -04001363 return 1;
1364}
Josef Bacik25179202008-10-29 14:49:05 -04001365
David Sterbabd1fa4f2015-12-03 13:08:59 +01001366void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
Chris Mason4adaa612013-03-26 13:07:00 -04001367{
1368 unsigned long index = start >> PAGE_CACHE_SHIFT;
1369 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1370 struct page *page;
1371
1372 while (index <= end_index) {
1373 page = find_get_page(inode->i_mapping, index);
1374 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1375 clear_page_dirty_for_io(page);
1376 page_cache_release(page);
1377 index++;
1378 }
Chris Mason4adaa612013-03-26 13:07:00 -04001379}
1380
David Sterbaf6311572015-12-03 13:08:59 +01001381void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
Chris Mason4adaa612013-03-26 13:07:00 -04001382{
1383 unsigned long index = start >> PAGE_CACHE_SHIFT;
1384 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1385 struct page *page;
1386
1387 while (index <= end_index) {
1388 page = find_get_page(inode->i_mapping, index);
1389 BUG_ON(!page); /* Pages should be in the extent_io_tree */
Chris Mason4adaa612013-03-26 13:07:00 -04001390 __set_page_dirty_nobuffers(page);
Konstantin Khebnikov8d386332015-02-11 15:26:55 -08001391 account_page_redirty(page);
Chris Mason4adaa612013-03-26 13:07:00 -04001392 page_cache_release(page);
1393 index++;
1394 }
Chris Mason4adaa612013-03-26 13:07:00 -04001395}
1396
Chris Masond1310b22008-01-24 16:13:08 -05001397/*
Chris Masond1310b22008-01-24 16:13:08 -05001398 * helper function to set both pages and extents in the tree writeback
1399 */
David Sterba35de6db2015-12-03 13:08:59 +01001400static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
Chris Masond1310b22008-01-24 16:13:08 -05001401{
1402 unsigned long index = start >> PAGE_CACHE_SHIFT;
1403 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1404 struct page *page;
1405
1406 while (index <= end_index) {
1407 page = find_get_page(tree->mapping, index);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001408 BUG_ON(!page); /* Pages should be in the extent_io_tree */
Chris Masond1310b22008-01-24 16:13:08 -05001409 set_page_writeback(page);
1410 page_cache_release(page);
1411 index++;
1412 }
Chris Masond1310b22008-01-24 16:13:08 -05001413}
Chris Masond1310b22008-01-24 16:13:08 -05001414
Chris Masond352ac62008-09-29 15:18:18 -04001415/* find the first state struct with 'bits' set after 'start', and
1416 * return it. tree->lock must be held. NULL will returned if
1417 * nothing was found after 'start'
1418 */
Eric Sandeen48a3b632013-04-25 20:41:01 +00001419static struct extent_state *
1420find_first_extent_bit_state(struct extent_io_tree *tree,
David Sterba9ee49a042015-01-14 19:52:13 +01001421 u64 start, unsigned bits)
Chris Masond7fc6402008-02-18 12:12:38 -05001422{
1423 struct rb_node *node;
1424 struct extent_state *state;
1425
1426 /*
1427 * this search will find all the extents that end after
1428 * our range starts.
1429 */
1430 node = tree_search(tree, start);
Chris Masond3977122009-01-05 21:25:51 -05001431 if (!node)
Chris Masond7fc6402008-02-18 12:12:38 -05001432 goto out;
Chris Masond7fc6402008-02-18 12:12:38 -05001433
Chris Masond3977122009-01-05 21:25:51 -05001434 while (1) {
Chris Masond7fc6402008-02-18 12:12:38 -05001435 state = rb_entry(node, struct extent_state, rb_node);
Chris Masond3977122009-01-05 21:25:51 -05001436 if (state->end >= start && (state->state & bits))
Chris Masond7fc6402008-02-18 12:12:38 -05001437 return state;
Chris Masond3977122009-01-05 21:25:51 -05001438
Chris Masond7fc6402008-02-18 12:12:38 -05001439 node = rb_next(node);
1440 if (!node)
1441 break;
1442 }
1443out:
1444 return NULL;
1445}
Chris Masond7fc6402008-02-18 12:12:38 -05001446
Chris Masond352ac62008-09-29 15:18:18 -04001447/*
Xiao Guangrong69261c42011-07-14 03:19:45 +00001448 * find the first offset in the io tree with 'bits' set. zero is
1449 * returned if we find something, and *start_ret and *end_ret are
1450 * set to reflect the state struct that was found.
1451 *
Wang Sheng-Hui477d7ea2012-04-06 14:35:47 +08001452 * If nothing was found, 1 is returned. If found something, return 0.
Xiao Guangrong69261c42011-07-14 03:19:45 +00001453 */
1454int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
David Sterba9ee49a042015-01-14 19:52:13 +01001455 u64 *start_ret, u64 *end_ret, unsigned bits,
Josef Bacike6138872012-09-27 17:07:30 -04001456 struct extent_state **cached_state)
Xiao Guangrong69261c42011-07-14 03:19:45 +00001457{
1458 struct extent_state *state;
Josef Bacike6138872012-09-27 17:07:30 -04001459 struct rb_node *n;
Xiao Guangrong69261c42011-07-14 03:19:45 +00001460 int ret = 1;
1461
1462 spin_lock(&tree->lock);
Josef Bacike6138872012-09-27 17:07:30 -04001463 if (cached_state && *cached_state) {
1464 state = *cached_state;
Filipe Manana27a35072014-07-06 20:09:59 +01001465 if (state->end == start - 1 && extent_state_in_tree(state)) {
Josef Bacike6138872012-09-27 17:07:30 -04001466 n = rb_next(&state->rb_node);
1467 while (n) {
1468 state = rb_entry(n, struct extent_state,
1469 rb_node);
1470 if (state->state & bits)
1471 goto got_it;
1472 n = rb_next(n);
1473 }
1474 free_extent_state(*cached_state);
1475 *cached_state = NULL;
1476 goto out;
1477 }
1478 free_extent_state(*cached_state);
1479 *cached_state = NULL;
1480 }
1481
Xiao Guangrong69261c42011-07-14 03:19:45 +00001482 state = find_first_extent_bit_state(tree, start, bits);
Josef Bacike6138872012-09-27 17:07:30 -04001483got_it:
Xiao Guangrong69261c42011-07-14 03:19:45 +00001484 if (state) {
Filipe Mananae38e2ed2014-10-13 12:28:38 +01001485 cache_state_if_flags(state, cached_state, 0);
Xiao Guangrong69261c42011-07-14 03:19:45 +00001486 *start_ret = state->start;
1487 *end_ret = state->end;
1488 ret = 0;
1489 }
Josef Bacike6138872012-09-27 17:07:30 -04001490out:
Xiao Guangrong69261c42011-07-14 03:19:45 +00001491 spin_unlock(&tree->lock);
1492 return ret;
1493}
1494
1495/*
Chris Masond352ac62008-09-29 15:18:18 -04001496 * find a contiguous range of bytes in the file marked as delalloc, not
1497 * more than 'max_bytes'. start and end are used to return the range,
1498 *
1499 * 1 is returned if we find something, 0 if nothing was in the tree
1500 */
Chris Masonc8b97812008-10-29 14:49:59 -04001501static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
Josef Bacikc2a128d2010-02-02 21:19:11 +00001502 u64 *start, u64 *end, u64 max_bytes,
1503 struct extent_state **cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05001504{
1505 struct rb_node *node;
1506 struct extent_state *state;
1507 u64 cur_start = *start;
1508 u64 found = 0;
1509 u64 total_bytes = 0;
1510
Chris Masoncad321a2008-12-17 14:51:42 -05001511 spin_lock(&tree->lock);
Chris Masonc8b97812008-10-29 14:49:59 -04001512
Chris Masond1310b22008-01-24 16:13:08 -05001513 /*
1514 * this search will find all the extents that end after
1515 * our range starts.
1516 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001517 node = tree_search(tree, cur_start);
Peter2b114d12008-04-01 11:21:40 -04001518 if (!node) {
Chris Mason3b951512008-04-17 11:29:12 -04001519 if (!found)
1520 *end = (u64)-1;
Chris Masond1310b22008-01-24 16:13:08 -05001521 goto out;
1522 }
1523
Chris Masond3977122009-01-05 21:25:51 -05001524 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001525 state = rb_entry(node, struct extent_state, rb_node);
Zheng Yan5b21f2e2008-09-26 10:05:38 -04001526 if (found && (state->start != cur_start ||
1527 (state->state & EXTENT_BOUNDARY))) {
Chris Masond1310b22008-01-24 16:13:08 -05001528 goto out;
1529 }
1530 if (!(state->state & EXTENT_DELALLOC)) {
1531 if (!found)
1532 *end = state->end;
1533 goto out;
1534 }
Josef Bacikc2a128d2010-02-02 21:19:11 +00001535 if (!found) {
Chris Masond1310b22008-01-24 16:13:08 -05001536 *start = state->start;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001537 *cached_state = state;
1538 atomic_inc(&state->refs);
1539 }
Chris Masond1310b22008-01-24 16:13:08 -05001540 found++;
1541 *end = state->end;
1542 cur_start = state->end + 1;
1543 node = rb_next(node);
Chris Masond1310b22008-01-24 16:13:08 -05001544 total_bytes += state->end - state->start + 1;
Josef Bacik7bf811a52013-10-07 22:11:09 -04001545 if (total_bytes >= max_bytes)
Josef Bacik573aeca2013-08-30 14:38:49 -04001546 break;
Josef Bacik573aeca2013-08-30 14:38:49 -04001547 if (!node)
Chris Masond1310b22008-01-24 16:13:08 -05001548 break;
1549 }
1550out:
Chris Masoncad321a2008-12-17 14:51:42 -05001551 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001552 return found;
1553}
1554
Jeff Mahoney143bede2012-03-01 14:56:26 +01001555static noinline void __unlock_for_delalloc(struct inode *inode,
1556 struct page *locked_page,
1557 u64 start, u64 end)
Chris Masonc8b97812008-10-29 14:49:59 -04001558{
1559 int ret;
1560 struct page *pages[16];
1561 unsigned long index = start >> PAGE_CACHE_SHIFT;
1562 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1563 unsigned long nr_pages = end_index - index + 1;
1564 int i;
1565
1566 if (index == locked_page->index && end_index == index)
Jeff Mahoney143bede2012-03-01 14:56:26 +01001567 return;
Chris Masonc8b97812008-10-29 14:49:59 -04001568
Chris Masond3977122009-01-05 21:25:51 -05001569 while (nr_pages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -04001570 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001571 min_t(unsigned long, nr_pages,
1572 ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -04001573 for (i = 0; i < ret; i++) {
1574 if (pages[i] != locked_page)
1575 unlock_page(pages[i]);
1576 page_cache_release(pages[i]);
1577 }
1578 nr_pages -= ret;
1579 index += ret;
1580 cond_resched();
1581 }
Chris Masonc8b97812008-10-29 14:49:59 -04001582}
1583
1584static noinline int lock_delalloc_pages(struct inode *inode,
1585 struct page *locked_page,
1586 u64 delalloc_start,
1587 u64 delalloc_end)
1588{
1589 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1590 unsigned long start_index = index;
1591 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1592 unsigned long pages_locked = 0;
1593 struct page *pages[16];
1594 unsigned long nrpages;
1595 int ret;
1596 int i;
1597
1598 /* the caller is responsible for locking the start index */
1599 if (index == locked_page->index && index == end_index)
1600 return 0;
1601
1602 /* skip the page at the start index */
1603 nrpages = end_index - index + 1;
Chris Masond3977122009-01-05 21:25:51 -05001604 while (nrpages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -04001605 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001606 min_t(unsigned long,
1607 nrpages, ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -04001608 if (ret == 0) {
1609 ret = -EAGAIN;
1610 goto done;
1611 }
1612 /* now we have an array of pages, lock them all */
1613 for (i = 0; i < ret; i++) {
1614 /*
1615 * the caller is taking responsibility for
1616 * locked_page
1617 */
Chris Mason771ed682008-11-06 22:02:51 -05001618 if (pages[i] != locked_page) {
Chris Masonc8b97812008-10-29 14:49:59 -04001619 lock_page(pages[i]);
Chris Masonf2b1c412008-11-10 07:31:30 -05001620 if (!PageDirty(pages[i]) ||
1621 pages[i]->mapping != inode->i_mapping) {
Chris Mason771ed682008-11-06 22:02:51 -05001622 ret = -EAGAIN;
1623 unlock_page(pages[i]);
1624 page_cache_release(pages[i]);
1625 goto done;
1626 }
1627 }
Chris Masonc8b97812008-10-29 14:49:59 -04001628 page_cache_release(pages[i]);
Chris Mason771ed682008-11-06 22:02:51 -05001629 pages_locked++;
Chris Masonc8b97812008-10-29 14:49:59 -04001630 }
Chris Masonc8b97812008-10-29 14:49:59 -04001631 nrpages -= ret;
1632 index += ret;
1633 cond_resched();
1634 }
1635 ret = 0;
1636done:
1637 if (ret && pages_locked) {
1638 __unlock_for_delalloc(inode, locked_page,
1639 delalloc_start,
1640 ((u64)(start_index + pages_locked - 1)) <<
1641 PAGE_CACHE_SHIFT);
1642 }
1643 return ret;
1644}
1645
1646/*
1647 * find a contiguous range of bytes in the file marked as delalloc, not
1648 * more than 'max_bytes'. start and end are used to return the range,
1649 *
1650 * 1 is returned if we find something, 0 if nothing was in the tree
1651 */
Josef Bacik294e30f2013-10-09 12:00:56 -04001652STATIC u64 find_lock_delalloc_range(struct inode *inode,
1653 struct extent_io_tree *tree,
1654 struct page *locked_page, u64 *start,
1655 u64 *end, u64 max_bytes)
Chris Masonc8b97812008-10-29 14:49:59 -04001656{
1657 u64 delalloc_start;
1658 u64 delalloc_end;
1659 u64 found;
Chris Mason9655d292009-09-02 15:22:30 -04001660 struct extent_state *cached_state = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04001661 int ret;
1662 int loops = 0;
1663
1664again:
1665 /* step one, find a bunch of delalloc bytes starting at start */
1666 delalloc_start = *start;
1667 delalloc_end = 0;
1668 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
Josef Bacikc2a128d2010-02-02 21:19:11 +00001669 max_bytes, &cached_state);
Chris Mason70b99e62008-10-31 12:46:39 -04001670 if (!found || delalloc_end <= *start) {
Chris Masonc8b97812008-10-29 14:49:59 -04001671 *start = delalloc_start;
1672 *end = delalloc_end;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001673 free_extent_state(cached_state);
Liu Bo385fe0b2013-10-01 23:49:49 +08001674 return 0;
Chris Masonc8b97812008-10-29 14:49:59 -04001675 }
1676
1677 /*
Chris Mason70b99e62008-10-31 12:46:39 -04001678 * start comes from the offset of locked_page. We have to lock
1679 * pages in order, so we can't process delalloc bytes before
1680 * locked_page
1681 */
Chris Masond3977122009-01-05 21:25:51 -05001682 if (delalloc_start < *start)
Chris Mason70b99e62008-10-31 12:46:39 -04001683 delalloc_start = *start;
Chris Mason70b99e62008-10-31 12:46:39 -04001684
1685 /*
Chris Masonc8b97812008-10-29 14:49:59 -04001686 * make sure to limit the number of pages we try to lock down
Chris Masonc8b97812008-10-29 14:49:59 -04001687 */
Josef Bacik7bf811a52013-10-07 22:11:09 -04001688 if (delalloc_end + 1 - delalloc_start > max_bytes)
1689 delalloc_end = delalloc_start + max_bytes - 1;
Chris Masond3977122009-01-05 21:25:51 -05001690
Chris Masonc8b97812008-10-29 14:49:59 -04001691 /* step two, lock all the pages after the page that has start */
1692 ret = lock_delalloc_pages(inode, locked_page,
1693 delalloc_start, delalloc_end);
1694 if (ret == -EAGAIN) {
1695 /* some of the pages are gone, lets avoid looping by
1696 * shortening the size of the delalloc range we're searching
1697 */
Chris Mason9655d292009-09-02 15:22:30 -04001698 free_extent_state(cached_state);
Chris Mason7d788742014-05-21 05:49:54 -07001699 cached_state = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04001700 if (!loops) {
Josef Bacik7bf811a52013-10-07 22:11:09 -04001701 max_bytes = PAGE_CACHE_SIZE;
Chris Masonc8b97812008-10-29 14:49:59 -04001702 loops = 1;
1703 goto again;
1704 } else {
1705 found = 0;
1706 goto out_failed;
1707 }
1708 }
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001709 BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
Chris Masonc8b97812008-10-29 14:49:59 -04001710
1711 /* step three, lock the state bits for the whole range */
David Sterbaff13db42015-12-03 14:30:40 +01001712 lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001713
1714 /* then test to make sure it is all still delalloc */
1715 ret = test_range_bit(tree, delalloc_start, delalloc_end,
Chris Mason9655d292009-09-02 15:22:30 -04001716 EXTENT_DELALLOC, 1, cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001717 if (!ret) {
Chris Mason9655d292009-09-02 15:22:30 -04001718 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1719 &cached_state, GFP_NOFS);
Chris Masonc8b97812008-10-29 14:49:59 -04001720 __unlock_for_delalloc(inode, locked_page,
1721 delalloc_start, delalloc_end);
1722 cond_resched();
1723 goto again;
1724 }
Chris Mason9655d292009-09-02 15:22:30 -04001725 free_extent_state(cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001726 *start = delalloc_start;
1727 *end = delalloc_end;
1728out_failed:
1729 return found;
1730}
1731
David Sterbaa9d93e12015-12-03 13:08:59 +01001732void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
Josef Bacikc2790a22013-07-29 11:20:47 -04001733 struct page *locked_page,
David Sterba9ee49a042015-01-14 19:52:13 +01001734 unsigned clear_bits,
Josef Bacikc2790a22013-07-29 11:20:47 -04001735 unsigned long page_ops)
Chris Masonc8b97812008-10-29 14:49:59 -04001736{
Josef Bacikc2790a22013-07-29 11:20:47 -04001737 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
Chris Masonc8b97812008-10-29 14:49:59 -04001738 int ret;
1739 struct page *pages[16];
1740 unsigned long index = start >> PAGE_CACHE_SHIFT;
1741 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1742 unsigned long nr_pages = end_index - index + 1;
1743 int i;
Chris Mason771ed682008-11-06 22:02:51 -05001744
Chris Mason2c64c532009-09-02 15:04:12 -04001745 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
Josef Bacikc2790a22013-07-29 11:20:47 -04001746 if (page_ops == 0)
David Sterbaa9d93e12015-12-03 13:08:59 +01001747 return;
Chris Masonc8b97812008-10-29 14:49:59 -04001748
Filipe Manana704de492014-10-06 22:14:22 +01001749 if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
1750 mapping_set_error(inode->i_mapping, -EIO);
1751
Chris Masond3977122009-01-05 21:25:51 -05001752 while (nr_pages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -04001753 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001754 min_t(unsigned long,
1755 nr_pages, ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -04001756 for (i = 0; i < ret; i++) {
Chris Mason8b62b722009-09-02 16:53:46 -04001757
Josef Bacikc2790a22013-07-29 11:20:47 -04001758 if (page_ops & PAGE_SET_PRIVATE2)
Chris Mason8b62b722009-09-02 16:53:46 -04001759 SetPagePrivate2(pages[i]);
1760
Chris Masonc8b97812008-10-29 14:49:59 -04001761 if (pages[i] == locked_page) {
1762 page_cache_release(pages[i]);
1763 continue;
1764 }
Josef Bacikc2790a22013-07-29 11:20:47 -04001765 if (page_ops & PAGE_CLEAR_DIRTY)
Chris Masonc8b97812008-10-29 14:49:59 -04001766 clear_page_dirty_for_io(pages[i]);
Josef Bacikc2790a22013-07-29 11:20:47 -04001767 if (page_ops & PAGE_SET_WRITEBACK)
Chris Masonc8b97812008-10-29 14:49:59 -04001768 set_page_writeback(pages[i]);
Filipe Manana704de492014-10-06 22:14:22 +01001769 if (page_ops & PAGE_SET_ERROR)
1770 SetPageError(pages[i]);
Josef Bacikc2790a22013-07-29 11:20:47 -04001771 if (page_ops & PAGE_END_WRITEBACK)
Chris Masonc8b97812008-10-29 14:49:59 -04001772 end_page_writeback(pages[i]);
Josef Bacikc2790a22013-07-29 11:20:47 -04001773 if (page_ops & PAGE_UNLOCK)
Chris Mason771ed682008-11-06 22:02:51 -05001774 unlock_page(pages[i]);
Chris Masonc8b97812008-10-29 14:49:59 -04001775 page_cache_release(pages[i]);
1776 }
1777 nr_pages -= ret;
1778 index += ret;
1779 cond_resched();
1780 }
Chris Masonc8b97812008-10-29 14:49:59 -04001781}
Chris Masonc8b97812008-10-29 14:49:59 -04001782
Chris Masond352ac62008-09-29 15:18:18 -04001783/*
1784 * count the number of bytes in the tree that have a given bit(s)
1785 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1786 * cached. The total number found is returned.
1787 */
Chris Masond1310b22008-01-24 16:13:08 -05001788u64 count_range_bits(struct extent_io_tree *tree,
1789 u64 *start, u64 search_end, u64 max_bytes,
David Sterba9ee49a042015-01-14 19:52:13 +01001790 unsigned bits, int contig)
Chris Masond1310b22008-01-24 16:13:08 -05001791{
1792 struct rb_node *node;
1793 struct extent_state *state;
1794 u64 cur_start = *start;
1795 u64 total_bytes = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05001796 u64 last = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001797 int found = 0;
1798
Dulshani Gunawardhanafae7f212013-10-31 10:30:08 +05301799 if (WARN_ON(search_end <= cur_start))
Chris Masond1310b22008-01-24 16:13:08 -05001800 return 0;
Chris Masond1310b22008-01-24 16:13:08 -05001801
Chris Masoncad321a2008-12-17 14:51:42 -05001802 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001803 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1804 total_bytes = tree->dirty_bytes;
1805 goto out;
1806 }
1807 /*
1808 * this search will find all the extents that end after
1809 * our range starts.
1810 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001811 node = tree_search(tree, cur_start);
Chris Masond3977122009-01-05 21:25:51 -05001812 if (!node)
Chris Masond1310b22008-01-24 16:13:08 -05001813 goto out;
Chris Masond1310b22008-01-24 16:13:08 -05001814
Chris Masond3977122009-01-05 21:25:51 -05001815 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001816 state = rb_entry(node, struct extent_state, rb_node);
1817 if (state->start > search_end)
1818 break;
Chris Masonec29ed52011-02-23 16:23:20 -05001819 if (contig && found && state->start > last + 1)
1820 break;
1821 if (state->end >= cur_start && (state->state & bits) == bits) {
Chris Masond1310b22008-01-24 16:13:08 -05001822 total_bytes += min(search_end, state->end) + 1 -
1823 max(cur_start, state->start);
1824 if (total_bytes >= max_bytes)
1825 break;
1826 if (!found) {
Josef Bacikaf60bed2011-05-04 11:11:17 -04001827 *start = max(cur_start, state->start);
Chris Masond1310b22008-01-24 16:13:08 -05001828 found = 1;
1829 }
Chris Masonec29ed52011-02-23 16:23:20 -05001830 last = state->end;
1831 } else if (contig && found) {
1832 break;
Chris Masond1310b22008-01-24 16:13:08 -05001833 }
1834 node = rb_next(node);
1835 if (!node)
1836 break;
1837 }
1838out:
Chris Masoncad321a2008-12-17 14:51:42 -05001839 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001840 return total_bytes;
1841}
Christoph Hellwigb2950862008-12-02 09:54:17 -05001842
Chris Masond352ac62008-09-29 15:18:18 -04001843/*
1844 * set the private field for a given byte offset in the tree. If there isn't
1845 * an extent_state there already, this does nothing.
1846 */
Sergei Trofimovich171170c2013-08-14 23:27:46 +03001847static int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
Chris Masond1310b22008-01-24 16:13:08 -05001848{
1849 struct rb_node *node;
1850 struct extent_state *state;
1851 int ret = 0;
1852
Chris Masoncad321a2008-12-17 14:51:42 -05001853 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001854 /*
1855 * this search will find all the extents that end after
1856 * our range starts.
1857 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001858 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001859 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001860 ret = -ENOENT;
1861 goto out;
1862 }
1863 state = rb_entry(node, struct extent_state, rb_node);
1864 if (state->start != start) {
1865 ret = -ENOENT;
1866 goto out;
1867 }
1868 state->private = private;
1869out:
Chris Masoncad321a2008-12-17 14:51:42 -05001870 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001871 return ret;
1872}
1873
1874int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1875{
1876 struct rb_node *node;
1877 struct extent_state *state;
1878 int ret = 0;
1879
Chris Masoncad321a2008-12-17 14:51:42 -05001880 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001881 /*
1882 * this search will find all the extents that end after
1883 * our range starts.
1884 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001885 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001886 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001887 ret = -ENOENT;
1888 goto out;
1889 }
1890 state = rb_entry(node, struct extent_state, rb_node);
1891 if (state->start != start) {
1892 ret = -ENOENT;
1893 goto out;
1894 }
1895 *private = state->private;
1896out:
Chris Masoncad321a2008-12-17 14:51:42 -05001897 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001898 return ret;
1899}
1900
1901/*
1902 * searches a range in the state tree for a given mask.
Chris Mason70dec802008-01-29 09:59:12 -05001903 * If 'filled' == 1, this returns 1 only if every extent in the tree
Chris Masond1310b22008-01-24 16:13:08 -05001904 * has the bits set. Otherwise, 1 is returned if any bit in the
1905 * range is found set.
1906 */
1907int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
David Sterba9ee49a042015-01-14 19:52:13 +01001908 unsigned bits, int filled, struct extent_state *cached)
Chris Masond1310b22008-01-24 16:13:08 -05001909{
1910 struct extent_state *state = NULL;
1911 struct rb_node *node;
1912 int bitset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001913
Chris Masoncad321a2008-12-17 14:51:42 -05001914 spin_lock(&tree->lock);
Filipe Manana27a35072014-07-06 20:09:59 +01001915 if (cached && extent_state_in_tree(cached) && cached->start <= start &&
Josef Bacikdf98b6e2011-06-20 14:53:48 -04001916 cached->end > start)
Chris Mason9655d292009-09-02 15:22:30 -04001917 node = &cached->rb_node;
1918 else
1919 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -05001920 while (node && start <= end) {
1921 state = rb_entry(node, struct extent_state, rb_node);
1922
1923 if (filled && state->start > start) {
1924 bitset = 0;
1925 break;
1926 }
1927
1928 if (state->start > end)
1929 break;
1930
1931 if (state->state & bits) {
1932 bitset = 1;
1933 if (!filled)
1934 break;
1935 } else if (filled) {
1936 bitset = 0;
1937 break;
1938 }
Chris Mason46562ce2009-09-23 20:23:16 -04001939
1940 if (state->end == (u64)-1)
1941 break;
1942
Chris Masond1310b22008-01-24 16:13:08 -05001943 start = state->end + 1;
1944 if (start > end)
1945 break;
1946 node = rb_next(node);
1947 if (!node) {
1948 if (filled)
1949 bitset = 0;
1950 break;
1951 }
1952 }
Chris Masoncad321a2008-12-17 14:51:42 -05001953 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001954 return bitset;
1955}
Chris Masond1310b22008-01-24 16:13:08 -05001956
1957/*
1958 * helper function to set a given page up to date if all the
1959 * extents in the tree for that page are up to date
1960 */
Jeff Mahoney143bede2012-03-01 14:56:26 +01001961static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
Chris Masond1310b22008-01-24 16:13:08 -05001962{
Miao Xie4eee4fa2012-12-21 09:17:45 +00001963 u64 start = page_offset(page);
Chris Masond1310b22008-01-24 16:13:08 -05001964 u64 end = start + PAGE_CACHE_SIZE - 1;
Chris Mason9655d292009-09-02 15:22:30 -04001965 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
Chris Masond1310b22008-01-24 16:13:08 -05001966 SetPageUptodate(page);
Chris Masond1310b22008-01-24 16:13:08 -05001967}
1968
Miao Xie8b110e32014-09-12 18:44:03 +08001969int free_io_failure(struct inode *inode, struct io_failure_record *rec)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001970{
1971 int ret;
1972 int err = 0;
1973 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1974
1975 set_state_private(failure_tree, rec->start, 0);
1976 ret = clear_extent_bits(failure_tree, rec->start,
1977 rec->start + rec->len - 1,
1978 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1979 if (ret)
1980 err = ret;
1981
David Woodhouse53b381b2013-01-29 18:40:14 -05001982 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1983 rec->start + rec->len - 1,
1984 EXTENT_DAMAGED, GFP_NOFS);
1985 if (ret && !err)
1986 err = ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001987
1988 kfree(rec);
1989 return err;
1990}
1991
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001992/*
1993 * this bypasses the standard btrfs submit functions deliberately, as
1994 * the standard behavior is to write all copies in a raid setup. here we only
1995 * want to write the one bad copy. so we do the mapping for ourselves and issue
1996 * submit_bio directly.
Stefan Behrens3ec706c2012-11-05 15:46:42 +01001997 * to avoid any synchronization issues, wait for the data after writing, which
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001998 * actually prevents the read that triggered the error from finishing.
1999 * currently, there can be no more than two copies of every data bit. thus,
2000 * exactly one rewrite is required.
2001 */
Miao Xie1203b682014-09-12 18:44:01 +08002002int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
2003 struct page *page, unsigned int pg_offset, int mirror_num)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002004{
Miao Xie1203b682014-09-12 18:44:01 +08002005 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002006 struct bio *bio;
2007 struct btrfs_device *dev;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002008 u64 map_length = 0;
2009 u64 sector;
2010 struct btrfs_bio *bbio = NULL;
David Woodhouse53b381b2013-01-29 18:40:14 -05002011 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002012 int ret;
2013
Ilya Dryomov908960c2013-11-03 19:06:39 +02002014 ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002015 BUG_ON(!mirror_num);
2016
David Woodhouse53b381b2013-01-29 18:40:14 -05002017 /* we can't repair anything in raid56 yet */
2018 if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
2019 return 0;
2020
Chris Mason9be33952013-05-17 18:30:14 -04002021 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002022 if (!bio)
2023 return -EIO;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002024 bio->bi_iter.bi_size = 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002025 map_length = length;
2026
Stefan Behrens3ec706c2012-11-05 15:46:42 +01002027 ret = btrfs_map_block(fs_info, WRITE, logical,
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002028 &map_length, &bbio, mirror_num);
2029 if (ret) {
2030 bio_put(bio);
2031 return -EIO;
2032 }
2033 BUG_ON(mirror_num != bbio->mirror_num);
2034 sector = bbio->stripes[mirror_num-1].physical >> 9;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002035 bio->bi_iter.bi_sector = sector;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002036 dev = bbio->stripes[mirror_num-1].dev;
Zhao Lei6e9606d2015-01-20 15:11:34 +08002037 btrfs_put_bbio(bbio);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002038 if (!dev || !dev->bdev || !dev->writeable) {
2039 bio_put(bio);
2040 return -EIO;
2041 }
2042 bio->bi_bdev = dev->bdev;
Miao Xieffdd2012014-09-12 18:44:00 +08002043 bio_add_page(bio, page, length, pg_offset);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002044
Kent Overstreet33879d42013-11-23 22:33:32 -08002045 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002046 /* try to remap that extent elsewhere? */
2047 bio_put(bio);
Stefan Behrens442a4f62012-05-25 16:06:08 +02002048 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002049 return -EIO;
2050 }
2051
David Sterbab14af3b2015-10-08 10:43:10 +02002052 btrfs_info_rl_in_rcu(fs_info,
2053 "read error corrected: ino %llu off %llu (dev %s sector %llu)",
Miao Xie1203b682014-09-12 18:44:01 +08002054 btrfs_ino(inode), start,
2055 rcu_str_deref(dev->name), sector);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002056 bio_put(bio);
2057 return 0;
2058}
2059
Josef Bacikea466792012-03-26 21:57:36 -04002060int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
2061 int mirror_num)
2062{
Josef Bacikea466792012-03-26 21:57:36 -04002063 u64 start = eb->start;
2064 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
Chris Masond95603b2012-04-12 15:55:15 -04002065 int ret = 0;
Josef Bacikea466792012-03-26 21:57:36 -04002066
Ilya Dryomov908960c2013-11-03 19:06:39 +02002067 if (root->fs_info->sb->s_flags & MS_RDONLY)
2068 return -EROFS;
2069
Josef Bacikea466792012-03-26 21:57:36 -04002070 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02002071 struct page *p = eb->pages[i];
Miao Xie1203b682014-09-12 18:44:01 +08002072
2073 ret = repair_io_failure(root->fs_info->btree_inode, start,
2074 PAGE_CACHE_SIZE, start, p,
2075 start - page_offset(p), mirror_num);
Josef Bacikea466792012-03-26 21:57:36 -04002076 if (ret)
2077 break;
2078 start += PAGE_CACHE_SIZE;
2079 }
2080
2081 return ret;
2082}
2083
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002084/*
2085 * each time an IO finishes, we do a fast check in the IO failure tree
2086 * to see if we need to process or clean up an io_failure_record
2087 */
Miao Xie8b110e32014-09-12 18:44:03 +08002088int clean_io_failure(struct inode *inode, u64 start, struct page *page,
2089 unsigned int pg_offset)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002090{
2091 u64 private;
2092 u64 private_failure;
2093 struct io_failure_record *failrec;
Ilya Dryomov908960c2013-11-03 19:06:39 +02002094 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002095 struct extent_state *state;
2096 int num_copies;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002097 int ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002098
2099 private = 0;
2100 ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
2101 (u64)-1, 1, EXTENT_DIRTY, 0);
2102 if (!ret)
2103 return 0;
2104
2105 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
2106 &private_failure);
2107 if (ret)
2108 return 0;
2109
2110 failrec = (struct io_failure_record *)(unsigned long) private_failure;
2111 BUG_ON(!failrec->this_mirror);
2112
2113 if (failrec->in_validation) {
2114 /* there was no real error, just free the record */
2115 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
2116 failrec->start);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002117 goto out;
2118 }
Ilya Dryomov908960c2013-11-03 19:06:39 +02002119 if (fs_info->sb->s_flags & MS_RDONLY)
2120 goto out;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002121
2122 spin_lock(&BTRFS_I(inode)->io_tree.lock);
2123 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
2124 failrec->start,
2125 EXTENT_LOCKED);
2126 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
2127
Miao Xie883d0de2013-07-25 19:22:35 +08002128 if (state && state->start <= failrec->start &&
2129 state->end >= failrec->start + failrec->len - 1) {
Stefan Behrens3ec706c2012-11-05 15:46:42 +01002130 num_copies = btrfs_num_copies(fs_info, failrec->logical,
2131 failrec->len);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002132 if (num_copies > 1) {
Miao Xie1203b682014-09-12 18:44:01 +08002133 repair_io_failure(inode, start, failrec->len,
Miao Xie454ff3d2014-09-12 18:43:58 +08002134 failrec->logical, page,
Miao Xie1203b682014-09-12 18:44:01 +08002135 pg_offset, failrec->failed_mirror);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002136 }
2137 }
2138
2139out:
Miao Xie454ff3d2014-09-12 18:43:58 +08002140 free_io_failure(inode, failrec);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002141
Miao Xie454ff3d2014-09-12 18:43:58 +08002142 return 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002143}
2144
Miao Xief6124962014-09-12 18:44:04 +08002145/*
2146 * Can be called when
2147 * - hold extent lock
2148 * - under ordered extent
2149 * - the inode is freeing
2150 */
2151void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end)
2152{
2153 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2154 struct io_failure_record *failrec;
2155 struct extent_state *state, *next;
2156
2157 if (RB_EMPTY_ROOT(&failure_tree->state))
2158 return;
2159
2160 spin_lock(&failure_tree->lock);
2161 state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2162 while (state) {
2163 if (state->start > end)
2164 break;
2165
2166 ASSERT(state->end <= end);
2167
2168 next = next_state(state);
2169
Satoru Takeuchi6e1103a2014-12-25 18:21:41 +09002170 failrec = (struct io_failure_record *)(unsigned long)state->private;
Miao Xief6124962014-09-12 18:44:04 +08002171 free_extent_state(state);
2172 kfree(failrec);
2173
2174 state = next;
2175 }
2176 spin_unlock(&failure_tree->lock);
2177}
2178
Miao Xie2fe63032014-09-12 18:43:59 +08002179int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
2180 struct io_failure_record **failrec_ret)
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002181{
Miao Xie2fe63032014-09-12 18:43:59 +08002182 struct io_failure_record *failrec;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002183 u64 private;
2184 struct extent_map *em;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002185 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2186 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2187 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002188 int ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002189 u64 logical;
2190
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002191 ret = get_state_private(failure_tree, start, &private);
2192 if (ret) {
2193 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2194 if (!failrec)
2195 return -ENOMEM;
Miao Xie2fe63032014-09-12 18:43:59 +08002196
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002197 failrec->start = start;
2198 failrec->len = end - start + 1;
2199 failrec->this_mirror = 0;
2200 failrec->bio_flags = 0;
2201 failrec->in_validation = 0;
2202
2203 read_lock(&em_tree->lock);
2204 em = lookup_extent_mapping(em_tree, start, failrec->len);
2205 if (!em) {
2206 read_unlock(&em_tree->lock);
2207 kfree(failrec);
2208 return -EIO;
2209 }
2210
Filipe David Borba Manana68ba9902013-11-25 03:22:07 +00002211 if (em->start > start || em->start + em->len <= start) {
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002212 free_extent_map(em);
2213 em = NULL;
2214 }
2215 read_unlock(&em_tree->lock);
Tsutomu Itoh7a2d6a62012-10-01 03:07:15 -06002216 if (!em) {
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002217 kfree(failrec);
2218 return -EIO;
2219 }
Miao Xie2fe63032014-09-12 18:43:59 +08002220
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002221 logical = start - em->start;
2222 logical = em->block_start + logical;
2223 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2224 logical = em->block_start;
2225 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2226 extent_set_compress_type(&failrec->bio_flags,
2227 em->compress_type);
2228 }
Miao Xie2fe63032014-09-12 18:43:59 +08002229
2230 pr_debug("Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu\n",
2231 logical, start, failrec->len);
2232
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002233 failrec->logical = logical;
2234 free_extent_map(em);
2235
2236 /* set the bits in the private failure tree */
2237 ret = set_extent_bits(failure_tree, start, end,
2238 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2239 if (ret >= 0)
2240 ret = set_state_private(failure_tree, start,
2241 (u64)(unsigned long)failrec);
2242 /* set the bits in the inode's tree */
2243 if (ret >= 0)
2244 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2245 GFP_NOFS);
2246 if (ret < 0) {
2247 kfree(failrec);
2248 return ret;
2249 }
2250 } else {
2251 failrec = (struct io_failure_record *)(unsigned long)private;
Miao Xie2fe63032014-09-12 18:43:59 +08002252 pr_debug("Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d\n",
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002253 failrec->logical, failrec->start, failrec->len,
2254 failrec->in_validation);
2255 /*
2256 * when data can be on disk more than twice, add to failrec here
2257 * (e.g. with a list for failed_mirror) to make
2258 * clean_io_failure() clean all those errors at once.
2259 */
2260 }
Miao Xie2fe63032014-09-12 18:43:59 +08002261
2262 *failrec_ret = failrec;
2263
2264 return 0;
2265}
2266
2267int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
2268 struct io_failure_record *failrec, int failed_mirror)
2269{
2270 int num_copies;
2271
Stefan Behrens5d964052012-11-05 14:59:07 +01002272 num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
2273 failrec->logical, failrec->len);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002274 if (num_copies == 1) {
2275 /*
2276 * we only have a single copy of the data, so don't bother with
2277 * all the retry and error correction code that follows. no
2278 * matter what the error is, it is very likely to persist.
2279 */
Miao Xie2fe63032014-09-12 18:43:59 +08002280 pr_debug("Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
Miao Xie09a7f7a2013-07-25 19:22:32 +08002281 num_copies, failrec->this_mirror, failed_mirror);
Miao Xie2fe63032014-09-12 18:43:59 +08002282 return 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002283 }
2284
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002285 /*
2286 * there are two premises:
2287 * a) deliver good data to the caller
2288 * b) correct the bad sectors on disk
2289 */
2290 if (failed_bio->bi_vcnt > 1) {
2291 /*
2292 * to fulfill b), we need to know the exact failing sectors, as
2293 * we don't want to rewrite any more than the failed ones. thus,
2294 * we need separate read requests for the failed bio
2295 *
2296 * if the following BUG_ON triggers, our validation request got
2297 * merged. we need separate requests for our algorithm to work.
2298 */
2299 BUG_ON(failrec->in_validation);
2300 failrec->in_validation = 1;
2301 failrec->this_mirror = failed_mirror;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002302 } else {
2303 /*
2304 * we're ready to fulfill a) and b) alongside. get a good copy
2305 * of the failed sector and if we succeed, we have setup
2306 * everything for repair_io_failure to do the rest for us.
2307 */
2308 if (failrec->in_validation) {
2309 BUG_ON(failrec->this_mirror != failed_mirror);
2310 failrec->in_validation = 0;
2311 failrec->this_mirror = 0;
2312 }
2313 failrec->failed_mirror = failed_mirror;
2314 failrec->this_mirror++;
2315 if (failrec->this_mirror == failed_mirror)
2316 failrec->this_mirror++;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002317 }
2318
Miao Xiefacc8a222013-07-25 19:22:34 +08002319 if (failrec->this_mirror > num_copies) {
Miao Xie2fe63032014-09-12 18:43:59 +08002320 pr_debug("Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002321 num_copies, failrec->this_mirror, failed_mirror);
Miao Xie2fe63032014-09-12 18:43:59 +08002322 return 0;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002323 }
2324
Miao Xie2fe63032014-09-12 18:43:59 +08002325 return 1;
2326}
2327
2328
2329struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
2330 struct io_failure_record *failrec,
2331 struct page *page, int pg_offset, int icsum,
Miao Xie8b110e32014-09-12 18:44:03 +08002332 bio_end_io_t *endio_func, void *data)
Miao Xie2fe63032014-09-12 18:43:59 +08002333{
2334 struct bio *bio;
2335 struct btrfs_io_bio *btrfs_failed_bio;
2336 struct btrfs_io_bio *btrfs_bio;
2337
Chris Mason9be33952013-05-17 18:30:14 -04002338 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
Miao Xie2fe63032014-09-12 18:43:59 +08002339 if (!bio)
2340 return NULL;
2341
2342 bio->bi_end_io = endio_func;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002343 bio->bi_iter.bi_sector = failrec->logical >> 9;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002344 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002345 bio->bi_iter.bi_size = 0;
Miao Xie8b110e32014-09-12 18:44:03 +08002346 bio->bi_private = data;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002347
Miao Xiefacc8a222013-07-25 19:22:34 +08002348 btrfs_failed_bio = btrfs_io_bio(failed_bio);
2349 if (btrfs_failed_bio->csum) {
2350 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2351 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
2352
2353 btrfs_bio = btrfs_io_bio(bio);
2354 btrfs_bio->csum = btrfs_bio->csum_inline;
Miao Xie2fe63032014-09-12 18:43:59 +08002355 icsum *= csum_size;
2356 memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + icsum,
Miao Xiefacc8a222013-07-25 19:22:34 +08002357 csum_size);
2358 }
2359
Miao Xie2fe63032014-09-12 18:43:59 +08002360 bio_add_page(bio, page, failrec->len, pg_offset);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002361
Miao Xie2fe63032014-09-12 18:43:59 +08002362 return bio;
2363}
2364
2365/*
2366 * this is a generic handler for readpage errors (default
2367 * readpage_io_failed_hook). if other copies exist, read those and write back
2368 * good data to the failed position. does not investigate in remapping the
2369 * failed extent elsewhere, hoping the device will be smart enough to do this as
2370 * needed
2371 */
2372
2373static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2374 struct page *page, u64 start, u64 end,
2375 int failed_mirror)
2376{
2377 struct io_failure_record *failrec;
2378 struct inode *inode = page->mapping->host;
2379 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2380 struct bio *bio;
2381 int read_mode;
2382 int ret;
2383
2384 BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2385
2386 ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
2387 if (ret)
2388 return ret;
2389
2390 ret = btrfs_check_repairable(inode, failed_bio, failrec, failed_mirror);
2391 if (!ret) {
2392 free_io_failure(inode, failrec);
2393 return -EIO;
2394 }
2395
2396 if (failed_bio->bi_vcnt > 1)
2397 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2398 else
2399 read_mode = READ_SYNC;
2400
2401 phy_offset >>= inode->i_sb->s_blocksize_bits;
2402 bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
2403 start - page_offset(page),
Miao Xie8b110e32014-09-12 18:44:03 +08002404 (int)phy_offset, failed_bio->bi_end_io,
2405 NULL);
Miao Xie2fe63032014-09-12 18:43:59 +08002406 if (!bio) {
2407 free_io_failure(inode, failrec);
2408 return -EIO;
2409 }
2410
2411 pr_debug("Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d\n",
2412 read_mode, failrec->this_mirror, failrec->in_validation);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002413
Tsutomu Itoh013bd4c2012-02-16 10:11:40 +09002414 ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2415 failrec->this_mirror,
2416 failrec->bio_flags, 0);
Miao Xie6c387ab2014-09-12 18:43:57 +08002417 if (ret) {
Miao Xie454ff3d2014-09-12 18:43:58 +08002418 free_io_failure(inode, failrec);
Miao Xie6c387ab2014-09-12 18:43:57 +08002419 bio_put(bio);
2420 }
2421
Tsutomu Itoh013bd4c2012-02-16 10:11:40 +09002422 return ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002423}
2424
Chris Masond1310b22008-01-24 16:13:08 -05002425/* lots and lots of room for performance fixes in the end_bio funcs */
2426
David Sterbab5227c02015-12-03 13:08:59 +01002427void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
Jeff Mahoney87826df2012-02-15 16:23:57 +01002428{
2429 int uptodate = (err == 0);
2430 struct extent_io_tree *tree;
Eric Sandeen3e2426b2014-06-12 00:39:58 -05002431 int ret = 0;
Jeff Mahoney87826df2012-02-15 16:23:57 +01002432
2433 tree = &BTRFS_I(page->mapping->host)->io_tree;
2434
2435 if (tree->ops && tree->ops->writepage_end_io_hook) {
2436 ret = tree->ops->writepage_end_io_hook(page, start,
2437 end, NULL, uptodate);
2438 if (ret)
2439 uptodate = 0;
2440 }
2441
Jeff Mahoney87826df2012-02-15 16:23:57 +01002442 if (!uptodate) {
Jeff Mahoney87826df2012-02-15 16:23:57 +01002443 ClearPageUptodate(page);
2444 SetPageError(page);
Liu Bo5dca6ee2014-05-12 12:47:36 +08002445 ret = ret < 0 ? ret : -EIO;
2446 mapping_set_error(page->mapping, ret);
Jeff Mahoney87826df2012-02-15 16:23:57 +01002447 }
Jeff Mahoney87826df2012-02-15 16:23:57 +01002448}
2449
Chris Masond1310b22008-01-24 16:13:08 -05002450/*
2451 * after a writepage IO is done, we need to:
2452 * clear the uptodate bits on error
2453 * clear the writeback bits in the extent tree for this IO
2454 * end_page_writeback if the page has no more pending IO
2455 *
2456 * Scheduling is not allowed, so the extent state tree is expected
2457 * to have one and only one object corresponding to this IO.
2458 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002459static void end_bio_extent_writepage(struct bio *bio)
Chris Masond1310b22008-01-24 16:13:08 -05002460{
Kent Overstreet2c30c712013-11-07 12:20:26 -08002461 struct bio_vec *bvec;
Chris Masond1310b22008-01-24 16:13:08 -05002462 u64 start;
2463 u64 end;
Kent Overstreet2c30c712013-11-07 12:20:26 -08002464 int i;
Chris Masond1310b22008-01-24 16:13:08 -05002465
Kent Overstreet2c30c712013-11-07 12:20:26 -08002466 bio_for_each_segment_all(bvec, bio, i) {
Chris Masond1310b22008-01-24 16:13:08 -05002467 struct page *page = bvec->bv_page;
David Woodhouse902b22f2008-08-20 08:51:49 -04002468
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002469 /* We always issue full-page reads, but if some block
2470 * in a page fails to read, blk_update_request() will
2471 * advance bv_offset and adjust bv_len to compensate.
2472 * Print a warning for nonzero offsets, and an error
2473 * if they don't add up to a full page. */
Frank Holtonefe120a2013-12-20 11:37:06 -05002474 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
2475 if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
2476 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2477 "partial page write in btrfs with offset %u and length %u",
2478 bvec->bv_offset, bvec->bv_len);
2479 else
2480 btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
2481 "incomplete page write in btrfs with offset %u and "
2482 "length %u",
2483 bvec->bv_offset, bvec->bv_len);
2484 }
Chris Masond1310b22008-01-24 16:13:08 -05002485
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002486 start = page_offset(page);
2487 end = start + bvec->bv_offset + bvec->bv_len - 1;
Chris Masond1310b22008-01-24 16:13:08 -05002488
David Sterbab5227c02015-12-03 13:08:59 +01002489 end_extent_writepage(page, bio->bi_error, start, end);
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002490 end_page_writeback(page);
Kent Overstreet2c30c712013-11-07 12:20:26 -08002491 }
Chris Mason2b1f55b2008-09-24 11:48:04 -04002492
Chris Masond1310b22008-01-24 16:13:08 -05002493 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05002494}
2495
Miao Xie883d0de2013-07-25 19:22:35 +08002496static void
2497endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2498 int uptodate)
2499{
2500 struct extent_state *cached = NULL;
2501 u64 end = start + len - 1;
2502
2503 if (uptodate && tree->track_uptodate)
2504 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2505 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2506}
2507
Chris Masond1310b22008-01-24 16:13:08 -05002508/*
2509 * after a readpage IO is done, we need to:
2510 * clear the uptodate bits on error
2511 * set the uptodate bits if things worked
2512 * set the page up to date if all extents in the tree are uptodate
2513 * clear the lock bit in the extent tree
2514 * unlock the page if there are no other extents locked for it
2515 *
2516 * Scheduling is not allowed, so the extent state tree is expected
2517 * to have one and only one object corresponding to this IO.
2518 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002519static void end_bio_extent_readpage(struct bio *bio)
Chris Masond1310b22008-01-24 16:13:08 -05002520{
Kent Overstreet2c30c712013-11-07 12:20:26 -08002521 struct bio_vec *bvec;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002522 int uptodate = !bio->bi_error;
Miao Xiefacc8a222013-07-25 19:22:34 +08002523 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
David Woodhouse902b22f2008-08-20 08:51:49 -04002524 struct extent_io_tree *tree;
Miao Xiefacc8a222013-07-25 19:22:34 +08002525 u64 offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002526 u64 start;
2527 u64 end;
Miao Xiefacc8a222013-07-25 19:22:34 +08002528 u64 len;
Miao Xie883d0de2013-07-25 19:22:35 +08002529 u64 extent_start = 0;
2530 u64 extent_len = 0;
Josef Bacik5cf1ab52012-04-16 09:42:26 -04002531 int mirror;
Chris Masond1310b22008-01-24 16:13:08 -05002532 int ret;
Kent Overstreet2c30c712013-11-07 12:20:26 -08002533 int i;
Chris Masond1310b22008-01-24 16:13:08 -05002534
Kent Overstreet2c30c712013-11-07 12:20:26 -08002535 bio_for_each_segment_all(bvec, bio, i) {
Chris Masond1310b22008-01-24 16:13:08 -05002536 struct page *page = bvec->bv_page;
Josef Bacika71754f2013-06-17 17:14:39 -04002537 struct inode *inode = page->mapping->host;
Arne Jansen507903b2011-04-06 10:02:20 +00002538
Kent Overstreetbe3940c2012-09-11 14:23:05 -06002539 pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002540 "mirror=%u\n", (u64)bio->bi_iter.bi_sector,
2541 bio->bi_error, io_bio->mirror_num);
Josef Bacika71754f2013-06-17 17:14:39 -04002542 tree = &BTRFS_I(inode)->io_tree;
David Woodhouse902b22f2008-08-20 08:51:49 -04002543
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002544 /* We always issue full-page reads, but if some block
2545 * in a page fails to read, blk_update_request() will
2546 * advance bv_offset and adjust bv_len to compensate.
2547 * Print a warning for nonzero offsets, and an error
2548 * if they don't add up to a full page. */
Frank Holtonefe120a2013-12-20 11:37:06 -05002549 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
2550 if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
2551 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2552 "partial page read in btrfs with offset %u and length %u",
2553 bvec->bv_offset, bvec->bv_len);
2554 else
2555 btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
2556 "incomplete page read in btrfs with offset %u and "
2557 "length %u",
2558 bvec->bv_offset, bvec->bv_len);
2559 }
Chris Masond1310b22008-01-24 16:13:08 -05002560
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002561 start = page_offset(page);
2562 end = start + bvec->bv_offset + bvec->bv_len - 1;
Miao Xiefacc8a222013-07-25 19:22:34 +08002563 len = bvec->bv_len;
Chris Masond1310b22008-01-24 16:13:08 -05002564
Chris Mason9be33952013-05-17 18:30:14 -04002565 mirror = io_bio->mirror_num;
Miao Xief2a09da2013-07-25 19:22:33 +08002566 if (likely(uptodate && tree->ops &&
2567 tree->ops->readpage_end_io_hook)) {
Miao Xiefacc8a222013-07-25 19:22:34 +08002568 ret = tree->ops->readpage_end_io_hook(io_bio, offset,
2569 page, start, end,
2570 mirror);
Stefan Behrens5ee08442012-08-27 08:30:03 -06002571 if (ret)
Chris Masond1310b22008-01-24 16:13:08 -05002572 uptodate = 0;
Stefan Behrens5ee08442012-08-27 08:30:03 -06002573 else
Miao Xie1203b682014-09-12 18:44:01 +08002574 clean_io_failure(inode, start, page, 0);
Chris Masond1310b22008-01-24 16:13:08 -05002575 }
Josef Bacikea466792012-03-26 21:57:36 -04002576
Miao Xief2a09da2013-07-25 19:22:33 +08002577 if (likely(uptodate))
2578 goto readpage_ok;
2579
2580 if (tree->ops && tree->ops->readpage_io_failed_hook) {
Josef Bacik5cf1ab52012-04-16 09:42:26 -04002581 ret = tree->ops->readpage_io_failed_hook(page, mirror);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002582 if (!ret && !bio->bi_error)
Josef Bacikea466792012-03-26 21:57:36 -04002583 uptodate = 1;
Miao Xief2a09da2013-07-25 19:22:33 +08002584 } else {
Jan Schmidtf4a8e652011-12-01 09:30:36 -05002585 /*
2586 * The generic bio_readpage_error handles errors the
2587 * following way: If possible, new read requests are
2588 * created and submitted and will end up in
2589 * end_bio_extent_readpage as well (if we're lucky, not
2590 * in the !uptodate case). In that case it returns 0 and
2591 * we just go on with the next page in our bio. If it
2592 * can't handle the error it will return -EIO and we
2593 * remain responsible for that page.
2594 */
Miao Xiefacc8a222013-07-25 19:22:34 +08002595 ret = bio_readpage_error(bio, offset, page, start, end,
2596 mirror);
Chris Mason7e383262008-04-09 16:28:12 -04002597 if (ret == 0) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002598 uptodate = !bio->bi_error;
Liu Bo38c1c2e2014-08-19 23:33:13 +08002599 offset += len;
Chris Mason7e383262008-04-09 16:28:12 -04002600 continue;
2601 }
2602 }
Miao Xief2a09da2013-07-25 19:22:33 +08002603readpage_ok:
Miao Xie883d0de2013-07-25 19:22:35 +08002604 if (likely(uptodate)) {
Josef Bacika71754f2013-06-17 17:14:39 -04002605 loff_t i_size = i_size_read(inode);
2606 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
Liu Boa583c022014-08-19 23:32:22 +08002607 unsigned off;
Josef Bacika71754f2013-06-17 17:14:39 -04002608
2609 /* Zero out the end if this page straddles i_size */
Liu Boa583c022014-08-19 23:32:22 +08002610 off = i_size & (PAGE_CACHE_SIZE-1);
2611 if (page->index == end_index && off)
2612 zero_user_segment(page, off, PAGE_CACHE_SIZE);
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002613 SetPageUptodate(page);
Chris Mason70dec802008-01-29 09:59:12 -05002614 } else {
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002615 ClearPageUptodate(page);
2616 SetPageError(page);
Chris Mason70dec802008-01-29 09:59:12 -05002617 }
Alexandre Oliva17a5adc2013-05-15 11:38:55 -04002618 unlock_page(page);
Miao Xiefacc8a222013-07-25 19:22:34 +08002619 offset += len;
Miao Xie883d0de2013-07-25 19:22:35 +08002620
2621 if (unlikely(!uptodate)) {
2622 if (extent_len) {
2623 endio_readpage_release_extent(tree,
2624 extent_start,
2625 extent_len, 1);
2626 extent_start = 0;
2627 extent_len = 0;
2628 }
2629 endio_readpage_release_extent(tree, start,
2630 end - start + 1, 0);
2631 } else if (!extent_len) {
2632 extent_start = start;
2633 extent_len = end + 1 - start;
2634 } else if (extent_start + extent_len == start) {
2635 extent_len += end + 1 - start;
2636 } else {
2637 endio_readpage_release_extent(tree, extent_start,
2638 extent_len, uptodate);
2639 extent_start = start;
2640 extent_len = end + 1 - start;
2641 }
Kent Overstreet2c30c712013-11-07 12:20:26 -08002642 }
Chris Masond1310b22008-01-24 16:13:08 -05002643
Miao Xie883d0de2013-07-25 19:22:35 +08002644 if (extent_len)
2645 endio_readpage_release_extent(tree, extent_start, extent_len,
2646 uptodate);
Miao Xiefacc8a222013-07-25 19:22:34 +08002647 if (io_bio->end_io)
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002648 io_bio->end_io(io_bio, bio->bi_error);
Chris Masond1310b22008-01-24 16:13:08 -05002649 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05002650}
2651
Chris Mason9be33952013-05-17 18:30:14 -04002652/*
2653 * this allocates from the btrfs_bioset. We're returning a bio right now
2654 * but you can call btrfs_io_bio for the appropriate container_of magic
2655 */
Miao Xie88f794e2010-11-22 03:02:55 +00002656struct bio *
2657btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2658 gfp_t gfp_flags)
Chris Masond1310b22008-01-24 16:13:08 -05002659{
Miao Xiefacc8a222013-07-25 19:22:34 +08002660 struct btrfs_io_bio *btrfs_bio;
Chris Masond1310b22008-01-24 16:13:08 -05002661 struct bio *bio;
2662
Chris Mason9be33952013-05-17 18:30:14 -04002663 bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset);
Chris Masond1310b22008-01-24 16:13:08 -05002664
2665 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
Chris Mason9be33952013-05-17 18:30:14 -04002666 while (!bio && (nr_vecs /= 2)) {
2667 bio = bio_alloc_bioset(gfp_flags,
2668 nr_vecs, btrfs_bioset);
2669 }
Chris Masond1310b22008-01-24 16:13:08 -05002670 }
2671
2672 if (bio) {
2673 bio->bi_bdev = bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002674 bio->bi_iter.bi_sector = first_sector;
Miao Xiefacc8a222013-07-25 19:22:34 +08002675 btrfs_bio = btrfs_io_bio(bio);
2676 btrfs_bio->csum = NULL;
2677 btrfs_bio->csum_allocated = NULL;
2678 btrfs_bio->end_io = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05002679 }
2680 return bio;
2681}
2682
Chris Mason9be33952013-05-17 18:30:14 -04002683struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
2684{
Miao Xie23ea8e52014-09-12 18:43:54 +08002685 struct btrfs_io_bio *btrfs_bio;
2686 struct bio *new;
Chris Mason9be33952013-05-17 18:30:14 -04002687
Miao Xie23ea8e52014-09-12 18:43:54 +08002688 new = bio_clone_bioset(bio, gfp_mask, btrfs_bioset);
2689 if (new) {
2690 btrfs_bio = btrfs_io_bio(new);
2691 btrfs_bio->csum = NULL;
2692 btrfs_bio->csum_allocated = NULL;
2693 btrfs_bio->end_io = NULL;
Chris Mason3a9508b2015-08-21 10:05:39 -07002694
2695#ifdef CONFIG_BLK_CGROUP
Chris Masonda2f0f72015-07-02 13:57:22 -07002696 /* FIXME, put this into bio_clone_bioset */
2697 if (bio->bi_css)
2698 bio_associate_blkcg(new, bio->bi_css);
Chris Mason3a9508b2015-08-21 10:05:39 -07002699#endif
Miao Xie23ea8e52014-09-12 18:43:54 +08002700 }
2701 return new;
2702}
Chris Mason9be33952013-05-17 18:30:14 -04002703
2704/* this also allocates from the btrfs_bioset */
2705struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
2706{
Miao Xiefacc8a222013-07-25 19:22:34 +08002707 struct btrfs_io_bio *btrfs_bio;
2708 struct bio *bio;
2709
2710 bio = bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset);
2711 if (bio) {
2712 btrfs_bio = btrfs_io_bio(bio);
2713 btrfs_bio->csum = NULL;
2714 btrfs_bio->csum_allocated = NULL;
2715 btrfs_bio->end_io = NULL;
2716 }
2717 return bio;
Chris Mason9be33952013-05-17 18:30:14 -04002718}
2719
2720
Jeff Mahoney355808c2011-10-03 23:23:14 -04002721static int __must_check submit_one_bio(int rw, struct bio *bio,
2722 int mirror_num, unsigned long bio_flags)
Chris Masond1310b22008-01-24 16:13:08 -05002723{
Chris Masond1310b22008-01-24 16:13:08 -05002724 int ret = 0;
Chris Mason70dec802008-01-29 09:59:12 -05002725 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2726 struct page *page = bvec->bv_page;
2727 struct extent_io_tree *tree = bio->bi_private;
Chris Mason70dec802008-01-29 09:59:12 -05002728 u64 start;
Chris Mason70dec802008-01-29 09:59:12 -05002729
Miao Xie4eee4fa2012-12-21 09:17:45 +00002730 start = page_offset(page) + bvec->bv_offset;
Chris Mason70dec802008-01-29 09:59:12 -05002731
David Woodhouse902b22f2008-08-20 08:51:49 -04002732 bio->bi_private = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05002733
2734 bio_get(bio);
2735
Chris Mason065631f2008-02-20 12:07:25 -05002736 if (tree->ops && tree->ops->submit_bio_hook)
liubo6b82ce82011-01-26 06:21:39 +00002737 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
Chris Masoneaf25d92010-05-25 09:48:28 -04002738 mirror_num, bio_flags, start);
Chris Mason0b86a832008-03-24 15:01:56 -04002739 else
Stefan Behrens21adbd52011-11-09 13:44:05 +01002740 btrfsic_submit_bio(rw, bio);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002741
Chris Masond1310b22008-01-24 16:13:08 -05002742 bio_put(bio);
2743 return ret;
2744}
2745
David Woodhouse64a16702009-07-15 23:29:37 +01002746static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
Jeff Mahoney3444a972011-10-03 23:23:13 -04002747 unsigned long offset, size_t size, struct bio *bio,
2748 unsigned long bio_flags)
2749{
2750 int ret = 0;
2751 if (tree->ops && tree->ops->merge_bio_hook)
David Woodhouse64a16702009-07-15 23:29:37 +01002752 ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
Jeff Mahoney3444a972011-10-03 23:23:13 -04002753 bio_flags);
2754 BUG_ON(ret < 0);
2755 return ret;
2756
2757}
2758
Chris Masond1310b22008-01-24 16:13:08 -05002759static int submit_extent_page(int rw, struct extent_io_tree *tree,
Chris Masonda2f0f72015-07-02 13:57:22 -07002760 struct writeback_control *wbc,
Chris Masond1310b22008-01-24 16:13:08 -05002761 struct page *page, sector_t sector,
2762 size_t size, unsigned long offset,
2763 struct block_device *bdev,
2764 struct bio **bio_ret,
2765 unsigned long max_pages,
Chris Masonf1885912008-04-09 16:28:12 -04002766 bio_end_io_t end_io_func,
Chris Masonc8b97812008-10-29 14:49:59 -04002767 int mirror_num,
2768 unsigned long prev_bio_flags,
Filipe Manana005efed2015-09-14 09:09:31 +01002769 unsigned long bio_flags,
2770 bool force_bio_submit)
Chris Masond1310b22008-01-24 16:13:08 -05002771{
2772 int ret = 0;
2773 struct bio *bio;
Chris Masonc8b97812008-10-29 14:49:59 -04002774 int contig = 0;
Chris Masonc8b97812008-10-29 14:49:59 -04002775 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
Chris Mason5b050f02008-11-11 09:34:41 -05002776 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
Chris Masond1310b22008-01-24 16:13:08 -05002777
2778 if (bio_ret && *bio_ret) {
2779 bio = *bio_ret;
Chris Masonc8b97812008-10-29 14:49:59 -04002780 if (old_compressed)
Kent Overstreet4f024f32013-10-11 15:44:27 -07002781 contig = bio->bi_iter.bi_sector == sector;
Chris Masonc8b97812008-10-29 14:49:59 -04002782 else
Kent Overstreetf73a1c72012-09-25 15:05:12 -07002783 contig = bio_end_sector(bio) == sector;
Chris Masonc8b97812008-10-29 14:49:59 -04002784
2785 if (prev_bio_flags != bio_flags || !contig ||
Filipe Manana005efed2015-09-14 09:09:31 +01002786 force_bio_submit ||
David Woodhouse64a16702009-07-15 23:29:37 +01002787 merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
Chris Masonc8b97812008-10-29 14:49:59 -04002788 bio_add_page(bio, page, page_size, offset) < page_size) {
2789 ret = submit_one_bio(rw, bio, mirror_num,
2790 prev_bio_flags);
Naohiro Aota289454a2015-01-06 01:01:03 +09002791 if (ret < 0) {
2792 *bio_ret = NULL;
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002793 return ret;
Naohiro Aota289454a2015-01-06 01:01:03 +09002794 }
Chris Masond1310b22008-01-24 16:13:08 -05002795 bio = NULL;
2796 } else {
Chris Masonda2f0f72015-07-02 13:57:22 -07002797 if (wbc)
2798 wbc_account_io(wbc, page, page_size);
Chris Masond1310b22008-01-24 16:13:08 -05002799 return 0;
2800 }
2801 }
Chris Masonc8b97812008-10-29 14:49:59 -04002802
Kent Overstreetb54ffb72015-05-19 14:31:01 +02002803 bio = btrfs_bio_alloc(bdev, sector, BIO_MAX_PAGES,
2804 GFP_NOFS | __GFP_HIGH);
Tsutomu Itoh5df67082011-02-01 09:17:35 +00002805 if (!bio)
2806 return -ENOMEM;
Chris Mason70dec802008-01-29 09:59:12 -05002807
Chris Masonc8b97812008-10-29 14:49:59 -04002808 bio_add_page(bio, page, page_size, offset);
Chris Masond1310b22008-01-24 16:13:08 -05002809 bio->bi_end_io = end_io_func;
2810 bio->bi_private = tree;
Chris Masonda2f0f72015-07-02 13:57:22 -07002811 if (wbc) {
2812 wbc_init_bio(wbc, bio);
2813 wbc_account_io(wbc, page, page_size);
2814 }
Chris Mason70dec802008-01-29 09:59:12 -05002815
Chris Masond3977122009-01-05 21:25:51 -05002816 if (bio_ret)
Chris Masond1310b22008-01-24 16:13:08 -05002817 *bio_ret = bio;
Chris Masond3977122009-01-05 21:25:51 -05002818 else
Chris Masonc8b97812008-10-29 14:49:59 -04002819 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002820
2821 return ret;
2822}
2823
Eric Sandeen48a3b632013-04-25 20:41:01 +00002824static void attach_extent_buffer_page(struct extent_buffer *eb,
2825 struct page *page)
Josef Bacik4f2de97a2012-03-07 16:20:05 -05002826{
2827 if (!PagePrivate(page)) {
2828 SetPagePrivate(page);
2829 page_cache_get(page);
2830 set_page_private(page, (unsigned long)eb);
2831 } else {
2832 WARN_ON(page->private != (unsigned long)eb);
2833 }
2834}
2835
Chris Masond1310b22008-01-24 16:13:08 -05002836void set_page_extent_mapped(struct page *page)
2837{
2838 if (!PagePrivate(page)) {
2839 SetPagePrivate(page);
Chris Masond1310b22008-01-24 16:13:08 -05002840 page_cache_get(page);
Chris Mason6af118ce2008-07-22 11:18:07 -04002841 set_page_private(page, EXTENT_PAGE_PRIVATE);
Chris Masond1310b22008-01-24 16:13:08 -05002842 }
2843}
2844
Miao Xie125bac012013-07-25 19:22:37 +08002845static struct extent_map *
2846__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
2847 u64 start, u64 len, get_extent_t *get_extent,
2848 struct extent_map **em_cached)
2849{
2850 struct extent_map *em;
2851
2852 if (em_cached && *em_cached) {
2853 em = *em_cached;
Filipe Mananacbc0e922014-02-25 14:15:12 +00002854 if (extent_map_in_tree(em) && start >= em->start &&
Miao Xie125bac012013-07-25 19:22:37 +08002855 start < extent_map_end(em)) {
2856 atomic_inc(&em->refs);
2857 return em;
2858 }
2859
2860 free_extent_map(em);
2861 *em_cached = NULL;
2862 }
2863
2864 em = get_extent(inode, page, pg_offset, start, len, 0);
2865 if (em_cached && !IS_ERR_OR_NULL(em)) {
2866 BUG_ON(*em_cached);
2867 atomic_inc(&em->refs);
2868 *em_cached = em;
2869 }
2870 return em;
2871}
Chris Masond1310b22008-01-24 16:13:08 -05002872/*
2873 * basic readpage implementation. Locked extent state structs are inserted
2874 * into the tree that are removed when the IO is done (by the end_io
2875 * handlers)
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002876 * XXX JDM: This needs looking at to ensure proper page locking
Chris Masond1310b22008-01-24 16:13:08 -05002877 */
Miao Xie99740902013-07-25 19:22:36 +08002878static int __do_readpage(struct extent_io_tree *tree,
2879 struct page *page,
2880 get_extent_t *get_extent,
Miao Xie125bac012013-07-25 19:22:37 +08002881 struct extent_map **em_cached,
Miao Xie99740902013-07-25 19:22:36 +08002882 struct bio **bio, int mirror_num,
Filipe Manana005efed2015-09-14 09:09:31 +01002883 unsigned long *bio_flags, int rw,
2884 u64 *prev_em_start)
Chris Masond1310b22008-01-24 16:13:08 -05002885{
2886 struct inode *inode = page->mapping->host;
Miao Xie4eee4fa2012-12-21 09:17:45 +00002887 u64 start = page_offset(page);
Chris Masond1310b22008-01-24 16:13:08 -05002888 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2889 u64 end;
2890 u64 cur = start;
2891 u64 extent_offset;
2892 u64 last_byte = i_size_read(inode);
2893 u64 block_start;
2894 u64 cur_end;
2895 sector_t sector;
2896 struct extent_map *em;
2897 struct block_device *bdev;
2898 int ret;
2899 int nr = 0;
Mark Fasheh4b384312013-08-06 11:42:50 -07002900 int parent_locked = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
David Sterba306e16c2011-04-19 14:29:38 +02002901 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002902 size_t iosize;
Chris Masonc8b97812008-10-29 14:49:59 -04002903 size_t disk_io_size;
Chris Masond1310b22008-01-24 16:13:08 -05002904 size_t blocksize = inode->i_sb->s_blocksize;
Mark Fasheh4b384312013-08-06 11:42:50 -07002905 unsigned long this_bio_flag = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
Chris Masond1310b22008-01-24 16:13:08 -05002906
2907 set_page_extent_mapped(page);
2908
Miao Xie99740902013-07-25 19:22:36 +08002909 end = page_end;
Dan Magenheimer90a887c2011-05-26 10:01:56 -06002910 if (!PageUptodate(page)) {
2911 if (cleancache_get_page(page) == 0) {
2912 BUG_ON(blocksize != PAGE_SIZE);
Miao Xie99740902013-07-25 19:22:36 +08002913 unlock_extent(tree, start, end);
Dan Magenheimer90a887c2011-05-26 10:01:56 -06002914 goto out;
2915 }
2916 }
2917
Chris Masonc8b97812008-10-29 14:49:59 -04002918 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2919 char *userpage;
2920 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2921
2922 if (zero_offset) {
2923 iosize = PAGE_CACHE_SIZE - zero_offset;
Cong Wang7ac687d2011-11-25 23:14:28 +08002924 userpage = kmap_atomic(page);
Chris Masonc8b97812008-10-29 14:49:59 -04002925 memset(userpage + zero_offset, 0, iosize);
2926 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08002927 kunmap_atomic(userpage);
Chris Masonc8b97812008-10-29 14:49:59 -04002928 }
2929 }
Chris Masond1310b22008-01-24 16:13:08 -05002930 while (cur <= end) {
Josef Bacikc8f2f242013-02-11 11:33:00 -05002931 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
Filipe Manana005efed2015-09-14 09:09:31 +01002932 bool force_bio_submit = false;
Josef Bacikc8f2f242013-02-11 11:33:00 -05002933
Chris Masond1310b22008-01-24 16:13:08 -05002934 if (cur >= last_byte) {
2935 char *userpage;
Arne Jansen507903b2011-04-06 10:02:20 +00002936 struct extent_state *cached = NULL;
2937
David Sterba306e16c2011-04-19 14:29:38 +02002938 iosize = PAGE_CACHE_SIZE - pg_offset;
Cong Wang7ac687d2011-11-25 23:14:28 +08002939 userpage = kmap_atomic(page);
David Sterba306e16c2011-04-19 14:29:38 +02002940 memset(userpage + pg_offset, 0, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05002941 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08002942 kunmap_atomic(userpage);
Chris Masond1310b22008-01-24 16:13:08 -05002943 set_extent_uptodate(tree, cur, cur + iosize - 1,
Arne Jansen507903b2011-04-06 10:02:20 +00002944 &cached, GFP_NOFS);
Mark Fasheh4b384312013-08-06 11:42:50 -07002945 if (!parent_locked)
2946 unlock_extent_cached(tree, cur,
2947 cur + iosize - 1,
2948 &cached, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05002949 break;
2950 }
Miao Xie125bac012013-07-25 19:22:37 +08002951 em = __get_extent_map(inode, page, pg_offset, cur,
2952 end - cur + 1, get_extent, em_cached);
David Sterbac7040052011-04-19 18:00:01 +02002953 if (IS_ERR_OR_NULL(em)) {
Chris Masond1310b22008-01-24 16:13:08 -05002954 SetPageError(page);
Mark Fasheh4b384312013-08-06 11:42:50 -07002955 if (!parent_locked)
2956 unlock_extent(tree, cur, end);
Chris Masond1310b22008-01-24 16:13:08 -05002957 break;
2958 }
Chris Masond1310b22008-01-24 16:13:08 -05002959 extent_offset = cur - em->start;
2960 BUG_ON(extent_map_end(em) <= cur);
2961 BUG_ON(end < cur);
2962
Li Zefan261507a02010-12-17 14:21:50 +08002963 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
Mark Fasheh4b384312013-08-06 11:42:50 -07002964 this_bio_flag |= EXTENT_BIO_COMPRESSED;
Li Zefan261507a02010-12-17 14:21:50 +08002965 extent_set_compress_type(&this_bio_flag,
2966 em->compress_type);
2967 }
Chris Masonc8b97812008-10-29 14:49:59 -04002968
Chris Masond1310b22008-01-24 16:13:08 -05002969 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2970 cur_end = min(extent_map_end(em) - 1, end);
Qu Wenruofda28322013-02-26 08:10:22 +00002971 iosize = ALIGN(iosize, blocksize);
Chris Masonc8b97812008-10-29 14:49:59 -04002972 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2973 disk_io_size = em->block_len;
2974 sector = em->block_start >> 9;
2975 } else {
2976 sector = (em->block_start + extent_offset) >> 9;
2977 disk_io_size = iosize;
2978 }
Chris Masond1310b22008-01-24 16:13:08 -05002979 bdev = em->bdev;
2980 block_start = em->block_start;
Yan Zhengd899e052008-10-30 14:25:28 -04002981 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2982 block_start = EXTENT_MAP_HOLE;
Filipe Manana005efed2015-09-14 09:09:31 +01002983
2984 /*
2985 * If we have a file range that points to a compressed extent
2986 * and it's followed by a consecutive file range that points to
2987 * to the same compressed extent (possibly with a different
2988 * offset and/or length, so it either points to the whole extent
2989 * or only part of it), we must make sure we do not submit a
2990 * single bio to populate the pages for the 2 ranges because
2991 * this makes the compressed extent read zero out the pages
2992 * belonging to the 2nd range. Imagine the following scenario:
2993 *
2994 * File layout
2995 * [0 - 8K] [8K - 24K]
2996 * | |
2997 * | |
2998 * points to extent X, points to extent X,
2999 * offset 4K, length of 8K offset 0, length 16K
3000 *
3001 * [extent X, compressed length = 4K uncompressed length = 16K]
3002 *
3003 * If the bio to read the compressed extent covers both ranges,
3004 * it will decompress extent X into the pages belonging to the
3005 * first range and then it will stop, zeroing out the remaining
3006 * pages that belong to the other range that points to extent X.
3007 * So here we make sure we submit 2 bios, one for the first
3008 * range and another one for the third range. Both will target
3009 * the same physical extent from disk, but we can't currently
3010 * make the compressed bio endio callback populate the pages
3011 * for both ranges because each compressed bio is tightly
3012 * coupled with a single extent map, and each range can have
3013 * an extent map with a different offset value relative to the
3014 * uncompressed data of our extent and different lengths. This
3015 * is a corner case so we prioritize correctness over
3016 * non-optimal behavior (submitting 2 bios for the same extent).
3017 */
3018 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3019 prev_em_start && *prev_em_start != (u64)-1 &&
3020 *prev_em_start != em->orig_start)
3021 force_bio_submit = true;
3022
3023 if (prev_em_start)
3024 *prev_em_start = em->orig_start;
3025
Chris Masond1310b22008-01-24 16:13:08 -05003026 free_extent_map(em);
3027 em = NULL;
3028
3029 /* we've found a hole, just zero and go on */
3030 if (block_start == EXTENT_MAP_HOLE) {
3031 char *userpage;
Arne Jansen507903b2011-04-06 10:02:20 +00003032 struct extent_state *cached = NULL;
3033
Cong Wang7ac687d2011-11-25 23:14:28 +08003034 userpage = kmap_atomic(page);
David Sterba306e16c2011-04-19 14:29:38 +02003035 memset(userpage + pg_offset, 0, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05003036 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08003037 kunmap_atomic(userpage);
Chris Masond1310b22008-01-24 16:13:08 -05003038
3039 set_extent_uptodate(tree, cur, cur + iosize - 1,
Arne Jansen507903b2011-04-06 10:02:20 +00003040 &cached, GFP_NOFS);
Filipe Manana5e6ecb32015-10-13 16:36:09 +01003041 if (parent_locked)
3042 free_extent_state(cached);
3043 else
3044 unlock_extent_cached(tree, cur,
3045 cur + iosize - 1,
3046 &cached, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05003047 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003048 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003049 continue;
3050 }
3051 /* the get_extent function already copied into the page */
Chris Mason9655d292009-09-02 15:22:30 -04003052 if (test_range_bit(tree, cur, cur_end,
3053 EXTENT_UPTODATE, 1, NULL)) {
Chris Masona1b32a52008-09-05 16:09:51 -04003054 check_page_uptodate(tree, page);
Mark Fasheh4b384312013-08-06 11:42:50 -07003055 if (!parent_locked)
3056 unlock_extent(tree, cur, cur + iosize - 1);
Chris Masond1310b22008-01-24 16:13:08 -05003057 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003058 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003059 continue;
3060 }
Chris Mason70dec802008-01-29 09:59:12 -05003061 /* we have an inline extent but it didn't get marked up
3062 * to date. Error out
3063 */
3064 if (block_start == EXTENT_MAP_INLINE) {
3065 SetPageError(page);
Mark Fasheh4b384312013-08-06 11:42:50 -07003066 if (!parent_locked)
3067 unlock_extent(tree, cur, cur + iosize - 1);
Chris Mason70dec802008-01-29 09:59:12 -05003068 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003069 pg_offset += iosize;
Chris Mason70dec802008-01-29 09:59:12 -05003070 continue;
3071 }
Chris Masond1310b22008-01-24 16:13:08 -05003072
Josef Bacikc8f2f242013-02-11 11:33:00 -05003073 pnr -= page->index;
Chris Masonda2f0f72015-07-02 13:57:22 -07003074 ret = submit_extent_page(rw, tree, NULL, page,
David Sterba306e16c2011-04-19 14:29:38 +02003075 sector, disk_io_size, pg_offset,
Chris Mason89642222008-07-24 09:41:53 -04003076 bdev, bio, pnr,
Chris Masonc8b97812008-10-29 14:49:59 -04003077 end_bio_extent_readpage, mirror_num,
3078 *bio_flags,
Filipe Manana005efed2015-09-14 09:09:31 +01003079 this_bio_flag,
3080 force_bio_submit);
Josef Bacikc8f2f242013-02-11 11:33:00 -05003081 if (!ret) {
3082 nr++;
3083 *bio_flags = this_bio_flag;
3084 } else {
Chris Masond1310b22008-01-24 16:13:08 -05003085 SetPageError(page);
Mark Fasheh4b384312013-08-06 11:42:50 -07003086 if (!parent_locked)
3087 unlock_extent(tree, cur, cur + iosize - 1);
Josef Bacikedd33c92012-10-05 16:40:32 -04003088 }
Chris Masond1310b22008-01-24 16:13:08 -05003089 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02003090 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003091 }
Dan Magenheimer90a887c2011-05-26 10:01:56 -06003092out:
Chris Masond1310b22008-01-24 16:13:08 -05003093 if (!nr) {
3094 if (!PageError(page))
3095 SetPageUptodate(page);
3096 unlock_page(page);
3097 }
3098 return 0;
3099}
3100
Miao Xie99740902013-07-25 19:22:36 +08003101static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
3102 struct page *pages[], int nr_pages,
3103 u64 start, u64 end,
3104 get_extent_t *get_extent,
Miao Xie125bac012013-07-25 19:22:37 +08003105 struct extent_map **em_cached,
Miao Xie99740902013-07-25 19:22:36 +08003106 struct bio **bio, int mirror_num,
Filipe Manana808f80b2015-09-28 09:56:26 +01003107 unsigned long *bio_flags, int rw,
3108 u64 *prev_em_start)
Miao Xie99740902013-07-25 19:22:36 +08003109{
3110 struct inode *inode;
3111 struct btrfs_ordered_extent *ordered;
3112 int index;
3113
3114 inode = pages[0]->mapping->host;
3115 while (1) {
3116 lock_extent(tree, start, end);
3117 ordered = btrfs_lookup_ordered_range(inode, start,
3118 end - start + 1);
3119 if (!ordered)
3120 break;
3121 unlock_extent(tree, start, end);
3122 btrfs_start_ordered_extent(inode, ordered, 1);
3123 btrfs_put_ordered_extent(ordered);
3124 }
3125
3126 for (index = 0; index < nr_pages; index++) {
Miao Xie125bac012013-07-25 19:22:37 +08003127 __do_readpage(tree, pages[index], get_extent, em_cached, bio,
Filipe Manana808f80b2015-09-28 09:56:26 +01003128 mirror_num, bio_flags, rw, prev_em_start);
Miao Xie99740902013-07-25 19:22:36 +08003129 page_cache_release(pages[index]);
3130 }
3131}
3132
3133static void __extent_readpages(struct extent_io_tree *tree,
3134 struct page *pages[],
3135 int nr_pages, get_extent_t *get_extent,
Miao Xie125bac012013-07-25 19:22:37 +08003136 struct extent_map **em_cached,
Miao Xie99740902013-07-25 19:22:36 +08003137 struct bio **bio, int mirror_num,
Filipe Manana808f80b2015-09-28 09:56:26 +01003138 unsigned long *bio_flags, int rw,
3139 u64 *prev_em_start)
Miao Xie99740902013-07-25 19:22:36 +08003140{
Stefan Behrens35a36212013-08-14 18:12:25 +02003141 u64 start = 0;
Miao Xie99740902013-07-25 19:22:36 +08003142 u64 end = 0;
3143 u64 page_start;
3144 int index;
Stefan Behrens35a36212013-08-14 18:12:25 +02003145 int first_index = 0;
Miao Xie99740902013-07-25 19:22:36 +08003146
3147 for (index = 0; index < nr_pages; index++) {
3148 page_start = page_offset(pages[index]);
3149 if (!end) {
3150 start = page_start;
3151 end = start + PAGE_CACHE_SIZE - 1;
3152 first_index = index;
3153 } else if (end + 1 == page_start) {
3154 end += PAGE_CACHE_SIZE;
3155 } else {
3156 __do_contiguous_readpages(tree, &pages[first_index],
3157 index - first_index, start,
Miao Xie125bac012013-07-25 19:22:37 +08003158 end, get_extent, em_cached,
3159 bio, mirror_num, bio_flags,
Filipe Manana808f80b2015-09-28 09:56:26 +01003160 rw, prev_em_start);
Miao Xie99740902013-07-25 19:22:36 +08003161 start = page_start;
3162 end = start + PAGE_CACHE_SIZE - 1;
3163 first_index = index;
3164 }
3165 }
3166
3167 if (end)
3168 __do_contiguous_readpages(tree, &pages[first_index],
3169 index - first_index, start,
Miao Xie125bac012013-07-25 19:22:37 +08003170 end, get_extent, em_cached, bio,
Filipe Manana808f80b2015-09-28 09:56:26 +01003171 mirror_num, bio_flags, rw,
3172 prev_em_start);
Miao Xie99740902013-07-25 19:22:36 +08003173}
3174
3175static int __extent_read_full_page(struct extent_io_tree *tree,
3176 struct page *page,
3177 get_extent_t *get_extent,
3178 struct bio **bio, int mirror_num,
3179 unsigned long *bio_flags, int rw)
3180{
3181 struct inode *inode = page->mapping->host;
3182 struct btrfs_ordered_extent *ordered;
3183 u64 start = page_offset(page);
3184 u64 end = start + PAGE_CACHE_SIZE - 1;
3185 int ret;
3186
3187 while (1) {
3188 lock_extent(tree, start, end);
3189 ordered = btrfs_lookup_ordered_extent(inode, start);
3190 if (!ordered)
3191 break;
3192 unlock_extent(tree, start, end);
3193 btrfs_start_ordered_extent(inode, ordered, 1);
3194 btrfs_put_ordered_extent(ordered);
3195 }
3196
Miao Xie125bac012013-07-25 19:22:37 +08003197 ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
Filipe Manana005efed2015-09-14 09:09:31 +01003198 bio_flags, rw, NULL);
Miao Xie99740902013-07-25 19:22:36 +08003199 return ret;
3200}
3201
Chris Masond1310b22008-01-24 16:13:08 -05003202int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
Jan Schmidt8ddc7d92011-06-13 20:02:58 +02003203 get_extent_t *get_extent, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05003204{
3205 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04003206 unsigned long bio_flags = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003207 int ret;
3208
Jan Schmidt8ddc7d92011-06-13 20:02:58 +02003209 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
Josef Bacikd4c7ca82013-04-19 19:49:09 -04003210 &bio_flags, READ);
Chris Masond1310b22008-01-24 16:13:08 -05003211 if (bio)
Jan Schmidt8ddc7d92011-06-13 20:02:58 +02003212 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05003213 return ret;
3214}
Chris Masond1310b22008-01-24 16:13:08 -05003215
Mark Fasheh4b384312013-08-06 11:42:50 -07003216int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
3217 get_extent_t *get_extent, int mirror_num)
3218{
3219 struct bio *bio = NULL;
3220 unsigned long bio_flags = EXTENT_BIO_PARENT_LOCKED;
3221 int ret;
3222
3223 ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
Filipe Manana005efed2015-09-14 09:09:31 +01003224 &bio_flags, READ, NULL);
Mark Fasheh4b384312013-08-06 11:42:50 -07003225 if (bio)
3226 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3227 return ret;
3228}
3229
Chris Mason11c83492009-04-20 15:50:09 -04003230static noinline void update_nr_written(struct page *page,
3231 struct writeback_control *wbc,
3232 unsigned long nr_written)
3233{
3234 wbc->nr_to_write -= nr_written;
3235 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
3236 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
3237 page->mapping->writeback_index = page->index + nr_written;
3238}
3239
Chris Masond1310b22008-01-24 16:13:08 -05003240/*
Chris Mason40f76582014-05-21 13:35:51 -07003241 * helper for __extent_writepage, doing all of the delayed allocation setup.
3242 *
3243 * This returns 1 if our fill_delalloc function did all the work required
3244 * to write the page (copy into inline extent). In this case the IO has
3245 * been started and the page is already unlocked.
3246 *
3247 * This returns 0 if all went well (page still locked)
3248 * This returns < 0 if there were errors (page still locked)
Chris Masond1310b22008-01-24 16:13:08 -05003249 */
Chris Mason40f76582014-05-21 13:35:51 -07003250static noinline_for_stack int writepage_delalloc(struct inode *inode,
3251 struct page *page, struct writeback_control *wbc,
3252 struct extent_page_data *epd,
3253 u64 delalloc_start,
3254 unsigned long *nr_written)
Chris Masond1310b22008-01-24 16:13:08 -05003255{
Chris Mason40f76582014-05-21 13:35:51 -07003256 struct extent_io_tree *tree = epd->tree;
3257 u64 page_end = delalloc_start + PAGE_CACHE_SIZE - 1;
3258 u64 nr_delalloc;
3259 u64 delalloc_to_write = 0;
3260 u64 delalloc_end = 0;
3261 int ret;
3262 int page_started = 0;
3263
3264 if (epd->extent_locked || !tree->ops || !tree->ops->fill_delalloc)
3265 return 0;
3266
3267 while (delalloc_end < page_end) {
3268 nr_delalloc = find_lock_delalloc_range(inode, tree,
3269 page,
3270 &delalloc_start,
3271 &delalloc_end,
Josef Bacikdcab6a32015-02-11 15:08:59 -05003272 BTRFS_MAX_EXTENT_SIZE);
Chris Mason40f76582014-05-21 13:35:51 -07003273 if (nr_delalloc == 0) {
3274 delalloc_start = delalloc_end + 1;
3275 continue;
3276 }
3277 ret = tree->ops->fill_delalloc(inode, page,
3278 delalloc_start,
3279 delalloc_end,
3280 &page_started,
3281 nr_written);
3282 /* File system has been set read-only */
3283 if (ret) {
3284 SetPageError(page);
3285 /* fill_delalloc should be return < 0 for error
3286 * but just in case, we use > 0 here meaning the
3287 * IO is started, so we don't want to return > 0
3288 * unless things are going well.
3289 */
3290 ret = ret < 0 ? ret : -EIO;
3291 goto done;
3292 }
3293 /*
3294 * delalloc_end is already one less than the total
3295 * length, so we don't subtract one from
3296 * PAGE_CACHE_SIZE
3297 */
3298 delalloc_to_write += (delalloc_end - delalloc_start +
3299 PAGE_CACHE_SIZE) >>
3300 PAGE_CACHE_SHIFT;
3301 delalloc_start = delalloc_end + 1;
3302 }
3303 if (wbc->nr_to_write < delalloc_to_write) {
3304 int thresh = 8192;
3305
3306 if (delalloc_to_write < thresh * 2)
3307 thresh = delalloc_to_write;
3308 wbc->nr_to_write = min_t(u64, delalloc_to_write,
3309 thresh);
3310 }
3311
3312 /* did the fill delalloc function already unlock and start
3313 * the IO?
3314 */
3315 if (page_started) {
3316 /*
3317 * we've unlocked the page, so we can't update
3318 * the mapping's writeback index, just update
3319 * nr_to_write.
3320 */
3321 wbc->nr_to_write -= *nr_written;
3322 return 1;
3323 }
3324
3325 ret = 0;
3326
3327done:
3328 return ret;
3329}
3330
3331/*
3332 * helper for __extent_writepage. This calls the writepage start hooks,
3333 * and does the loop to map the page into extents and bios.
3334 *
3335 * We return 1 if the IO is started and the page is unlocked,
3336 * 0 if all went well (page still locked)
3337 * < 0 if there were errors (page still locked)
3338 */
3339static noinline_for_stack int __extent_writepage_io(struct inode *inode,
3340 struct page *page,
3341 struct writeback_control *wbc,
3342 struct extent_page_data *epd,
3343 loff_t i_size,
3344 unsigned long nr_written,
3345 int write_flags, int *nr_ret)
3346{
Chris Masond1310b22008-01-24 16:13:08 -05003347 struct extent_io_tree *tree = epd->tree;
Miao Xie4eee4fa2012-12-21 09:17:45 +00003348 u64 start = page_offset(page);
Chris Masond1310b22008-01-24 16:13:08 -05003349 u64 page_end = start + PAGE_CACHE_SIZE - 1;
3350 u64 end;
3351 u64 cur = start;
3352 u64 extent_offset;
Chris Masond1310b22008-01-24 16:13:08 -05003353 u64 block_start;
3354 u64 iosize;
3355 sector_t sector;
Chris Mason2c64c532009-09-02 15:04:12 -04003356 struct extent_state *cached_state = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05003357 struct extent_map *em;
3358 struct block_device *bdev;
Chris Mason7f3c74f2008-07-18 12:01:11 -04003359 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003360 size_t blocksize;
Chris Mason40f76582014-05-21 13:35:51 -07003361 int ret = 0;
3362 int nr = 0;
3363 bool compressed;
Chris Masond1310b22008-01-24 16:13:08 -05003364
Chris Mason247e7432008-07-17 12:53:51 -04003365 if (tree->ops && tree->ops->writepage_start_hook) {
Chris Masonc8b97812008-10-29 14:49:59 -04003366 ret = tree->ops->writepage_start_hook(page, start,
3367 page_end);
Jeff Mahoney87826df2012-02-15 16:23:57 +01003368 if (ret) {
3369 /* Fixup worker will requeue */
3370 if (ret == -EBUSY)
3371 wbc->pages_skipped++;
3372 else
3373 redirty_page_for_writepage(wbc, page);
Chris Mason40f76582014-05-21 13:35:51 -07003374
Chris Mason11c83492009-04-20 15:50:09 -04003375 update_nr_written(page, wbc, nr_written);
Chris Mason247e7432008-07-17 12:53:51 -04003376 unlock_page(page);
Chris Mason40f76582014-05-21 13:35:51 -07003377 ret = 1;
Chris Mason11c83492009-04-20 15:50:09 -04003378 goto done_unlocked;
Chris Mason247e7432008-07-17 12:53:51 -04003379 }
3380 }
3381
Chris Mason11c83492009-04-20 15:50:09 -04003382 /*
3383 * we don't want to touch the inode after unlocking the page,
3384 * so we update the mapping writeback index now
3385 */
3386 update_nr_written(page, wbc, nr_written + 1);
Chris Mason771ed682008-11-06 22:02:51 -05003387
Chris Masond1310b22008-01-24 16:13:08 -05003388 end = page_end;
Chris Mason40f76582014-05-21 13:35:51 -07003389 if (i_size <= start) {
Chris Masone6dcd2d2008-07-17 12:53:50 -04003390 if (tree->ops && tree->ops->writepage_end_io_hook)
3391 tree->ops->writepage_end_io_hook(page, start,
3392 page_end, NULL, 1);
Chris Masond1310b22008-01-24 16:13:08 -05003393 goto done;
3394 }
3395
Chris Masond1310b22008-01-24 16:13:08 -05003396 blocksize = inode->i_sb->s_blocksize;
3397
3398 while (cur <= end) {
Chris Mason40f76582014-05-21 13:35:51 -07003399 u64 em_end;
3400 if (cur >= i_size) {
Chris Masone6dcd2d2008-07-17 12:53:50 -04003401 if (tree->ops && tree->ops->writepage_end_io_hook)
3402 tree->ops->writepage_end_io_hook(page, cur,
3403 page_end, NULL, 1);
Chris Masond1310b22008-01-24 16:13:08 -05003404 break;
3405 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04003406 em = epd->get_extent(inode, page, pg_offset, cur,
Chris Masond1310b22008-01-24 16:13:08 -05003407 end - cur + 1, 1);
David Sterbac7040052011-04-19 18:00:01 +02003408 if (IS_ERR_OR_NULL(em)) {
Chris Masond1310b22008-01-24 16:13:08 -05003409 SetPageError(page);
Filipe Manana61391d52014-05-09 17:17:40 +01003410 ret = PTR_ERR_OR_ZERO(em);
Chris Masond1310b22008-01-24 16:13:08 -05003411 break;
3412 }
3413
3414 extent_offset = cur - em->start;
Chris Mason40f76582014-05-21 13:35:51 -07003415 em_end = extent_map_end(em);
3416 BUG_ON(em_end <= cur);
Chris Masond1310b22008-01-24 16:13:08 -05003417 BUG_ON(end < cur);
Chris Mason40f76582014-05-21 13:35:51 -07003418 iosize = min(em_end - cur, end - cur + 1);
Qu Wenruofda28322013-02-26 08:10:22 +00003419 iosize = ALIGN(iosize, blocksize);
Chris Masond1310b22008-01-24 16:13:08 -05003420 sector = (em->block_start + extent_offset) >> 9;
3421 bdev = em->bdev;
3422 block_start = em->block_start;
Chris Masonc8b97812008-10-29 14:49:59 -04003423 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
Chris Masond1310b22008-01-24 16:13:08 -05003424 free_extent_map(em);
3425 em = NULL;
3426
Chris Masonc8b97812008-10-29 14:49:59 -04003427 /*
3428 * compressed and inline extents are written through other
3429 * paths in the FS
3430 */
3431 if (compressed || block_start == EXTENT_MAP_HOLE ||
Chris Masond1310b22008-01-24 16:13:08 -05003432 block_start == EXTENT_MAP_INLINE) {
Chris Masonc8b97812008-10-29 14:49:59 -04003433 /*
3434 * end_io notification does not happen here for
3435 * compressed extents
3436 */
3437 if (!compressed && tree->ops &&
3438 tree->ops->writepage_end_io_hook)
Chris Masone6dcd2d2008-07-17 12:53:50 -04003439 tree->ops->writepage_end_io_hook(page, cur,
3440 cur + iosize - 1,
3441 NULL, 1);
Chris Masonc8b97812008-10-29 14:49:59 -04003442 else if (compressed) {
3443 /* we don't want to end_page_writeback on
3444 * a compressed extent. this happens
3445 * elsewhere
3446 */
3447 nr++;
3448 }
3449
3450 cur += iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04003451 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003452 continue;
3453 }
Chris Masonc8b97812008-10-29 14:49:59 -04003454
Chris Masond1310b22008-01-24 16:13:08 -05003455 if (tree->ops && tree->ops->writepage_io_hook) {
3456 ret = tree->ops->writepage_io_hook(page, cur,
3457 cur + iosize - 1);
3458 } else {
3459 ret = 0;
3460 }
Chris Mason1259ab72008-05-12 13:39:03 -04003461 if (ret) {
Chris Masond1310b22008-01-24 16:13:08 -05003462 SetPageError(page);
Chris Mason1259ab72008-05-12 13:39:03 -04003463 } else {
Chris Mason40f76582014-05-21 13:35:51 -07003464 unsigned long max_nr = (i_size >> PAGE_CACHE_SHIFT) + 1;
Chris Mason7f3c74f2008-07-18 12:01:11 -04003465
Chris Masond1310b22008-01-24 16:13:08 -05003466 set_range_writeback(tree, cur, cur + iosize - 1);
3467 if (!PageWriteback(page)) {
Frank Holtonefe120a2013-12-20 11:37:06 -05003468 btrfs_err(BTRFS_I(inode)->root->fs_info,
3469 "page %lu not writeback, cur %llu end %llu",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02003470 page->index, cur, end);
Chris Masond1310b22008-01-24 16:13:08 -05003471 }
3472
Chris Masonda2f0f72015-07-02 13:57:22 -07003473 ret = submit_extent_page(write_flags, tree, wbc, page,
Chris Masonffbd5172009-04-20 15:50:09 -04003474 sector, iosize, pg_offset,
3475 bdev, &epd->bio, max_nr,
Chris Masonc8b97812008-10-29 14:49:59 -04003476 end_bio_extent_writepage,
Filipe Manana005efed2015-09-14 09:09:31 +01003477 0, 0, 0, false);
Chris Masond1310b22008-01-24 16:13:08 -05003478 if (ret)
3479 SetPageError(page);
3480 }
3481 cur = cur + iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04003482 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003483 nr++;
3484 }
3485done:
Chris Mason40f76582014-05-21 13:35:51 -07003486 *nr_ret = nr;
Chris Mason771ed682008-11-06 22:02:51 -05003487
Chris Mason11c83492009-04-20 15:50:09 -04003488done_unlocked:
3489
Chris Mason2c64c532009-09-02 15:04:12 -04003490 /* drop our reference on any cached states */
3491 free_extent_state(cached_state);
Chris Mason40f76582014-05-21 13:35:51 -07003492 return ret;
3493}
3494
3495/*
3496 * the writepage semantics are similar to regular writepage. extent
3497 * records are inserted to lock ranges in the tree, and as dirty areas
3498 * are found, they are marked writeback. Then the lock bits are removed
3499 * and the end_io handler clears the writeback ranges
3500 */
3501static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3502 void *data)
3503{
3504 struct inode *inode = page->mapping->host;
3505 struct extent_page_data *epd = data;
3506 u64 start = page_offset(page);
3507 u64 page_end = start + PAGE_CACHE_SIZE - 1;
3508 int ret;
3509 int nr = 0;
3510 size_t pg_offset = 0;
3511 loff_t i_size = i_size_read(inode);
3512 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
3513 int write_flags;
3514 unsigned long nr_written = 0;
3515
3516 if (wbc->sync_mode == WB_SYNC_ALL)
3517 write_flags = WRITE_SYNC;
3518 else
3519 write_flags = WRITE;
3520
3521 trace___extent_writepage(page, inode, wbc);
3522
3523 WARN_ON(!PageLocked(page));
3524
3525 ClearPageError(page);
3526
3527 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
3528 if (page->index > end_index ||
3529 (page->index == end_index && !pg_offset)) {
3530 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
3531 unlock_page(page);
3532 return 0;
3533 }
3534
3535 if (page->index == end_index) {
3536 char *userpage;
3537
3538 userpage = kmap_atomic(page);
3539 memset(userpage + pg_offset, 0,
3540 PAGE_CACHE_SIZE - pg_offset);
3541 kunmap_atomic(userpage);
3542 flush_dcache_page(page);
3543 }
3544
3545 pg_offset = 0;
3546
3547 set_page_extent_mapped(page);
3548
3549 ret = writepage_delalloc(inode, page, wbc, epd, start, &nr_written);
3550 if (ret == 1)
3551 goto done_unlocked;
3552 if (ret)
3553 goto done;
3554
3555 ret = __extent_writepage_io(inode, page, wbc, epd,
3556 i_size, nr_written, write_flags, &nr);
3557 if (ret == 1)
3558 goto done_unlocked;
3559
3560done:
Chris Masone6dcd2d2008-07-17 12:53:50 -04003561 if (nr == 0) {
Chris Masond1310b22008-01-24 16:13:08 -05003562 /* make sure the mapping tag for page dirty gets cleared */
Chris Mason771ed682008-11-06 22:02:51 -05003563 set_page_writeback(page);
3564 end_page_writeback(page);
3565 }
Filipe Manana61391d52014-05-09 17:17:40 +01003566 if (PageError(page)) {
3567 ret = ret < 0 ? ret : -EIO;
3568 end_extent_writepage(page, ret, start, page_end);
3569 }
Christoph Hellwigb2950862008-12-02 09:54:17 -05003570 unlock_page(page);
Chris Mason40f76582014-05-21 13:35:51 -07003571 return ret;
Chris Mason4bef0842008-09-08 11:18:08 -04003572
3573done_unlocked:
Chris Masond1310b22008-01-24 16:13:08 -05003574 return 0;
3575}
3576
Josef Bacikfd8b2b62013-04-24 16:41:19 -04003577void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003578{
NeilBrown74316202014-07-07 15:16:04 +10003579 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
3580 TASK_UNINTERRUPTIBLE);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003581}
3582
Chris Mason0e378df2014-05-19 20:55:27 -07003583static noinline_for_stack int
3584lock_extent_buffer_for_io(struct extent_buffer *eb,
3585 struct btrfs_fs_info *fs_info,
3586 struct extent_page_data *epd)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003587{
3588 unsigned long i, num_pages;
3589 int flush = 0;
3590 int ret = 0;
3591
3592 if (!btrfs_try_tree_write_lock(eb)) {
3593 flush = 1;
3594 flush_write_bio(epd);
3595 btrfs_tree_lock(eb);
3596 }
3597
3598 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3599 btrfs_tree_unlock(eb);
3600 if (!epd->sync_io)
3601 return 0;
3602 if (!flush) {
3603 flush_write_bio(epd);
3604 flush = 1;
3605 }
Chris Masona098d8e2012-03-21 12:09:56 -04003606 while (1) {
3607 wait_on_extent_buffer_writeback(eb);
3608 btrfs_tree_lock(eb);
3609 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3610 break;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003611 btrfs_tree_unlock(eb);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003612 }
3613 }
3614
Josef Bacik51561ff2012-07-20 16:25:24 -04003615 /*
3616 * We need to do this to prevent races in people who check if the eb is
3617 * under IO since we can end up having no IO bits set for a short period
3618 * of time.
3619 */
3620 spin_lock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003621 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3622 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
Josef Bacik51561ff2012-07-20 16:25:24 -04003623 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003624 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
Miao Xiee2d84522013-01-29 10:09:20 +00003625 __percpu_counter_add(&fs_info->dirty_metadata_bytes,
3626 -eb->len,
3627 fs_info->dirty_metadata_batch);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003628 ret = 1;
Josef Bacik51561ff2012-07-20 16:25:24 -04003629 } else {
3630 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003631 }
3632
3633 btrfs_tree_unlock(eb);
3634
3635 if (!ret)
3636 return ret;
3637
3638 num_pages = num_extent_pages(eb->start, eb->len);
3639 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02003640 struct page *p = eb->pages[i];
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003641
3642 if (!trylock_page(p)) {
3643 if (!flush) {
3644 flush_write_bio(epd);
3645 flush = 1;
3646 }
3647 lock_page(p);
3648 }
3649 }
3650
3651 return ret;
3652}
3653
3654static void end_extent_buffer_writeback(struct extent_buffer *eb)
3655{
3656 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01003657 smp_mb__after_atomic();
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003658 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3659}
3660
Filipe Manana656f30d2014-09-26 12:25:56 +01003661static void set_btree_ioerr(struct page *page)
3662{
3663 struct extent_buffer *eb = (struct extent_buffer *)page->private;
3664 struct btrfs_inode *btree_ino = BTRFS_I(eb->fs_info->btree_inode);
3665
3666 SetPageError(page);
3667 if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3668 return;
3669
3670 /*
3671 * If writeback for a btree extent that doesn't belong to a log tree
3672 * failed, increment the counter transaction->eb_write_errors.
3673 * We do this because while the transaction is running and before it's
3674 * committing (when we call filemap_fdata[write|wait]_range against
3675 * the btree inode), we might have
3676 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
3677 * returns an error or an error happens during writeback, when we're
3678 * committing the transaction we wouldn't know about it, since the pages
3679 * can be no longer dirty nor marked anymore for writeback (if a
3680 * subsequent modification to the extent buffer didn't happen before the
3681 * transaction commit), which makes filemap_fdata[write|wait]_range not
3682 * able to find the pages tagged with SetPageError at transaction
3683 * commit time. So if this happens we must abort the transaction,
3684 * otherwise we commit a super block with btree roots that point to
3685 * btree nodes/leafs whose content on disk is invalid - either garbage
3686 * or the content of some node/leaf from a past generation that got
3687 * cowed or deleted and is no longer valid.
3688 *
3689 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
3690 * not be enough - we need to distinguish between log tree extents vs
3691 * non-log tree extents, and the next filemap_fdatawait_range() call
3692 * will catch and clear such errors in the mapping - and that call might
3693 * be from a log sync and not from a transaction commit. Also, checking
3694 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
3695 * not done and would not be reliable - the eb might have been released
3696 * from memory and reading it back again means that flag would not be
3697 * set (since it's a runtime flag, not persisted on disk).
3698 *
3699 * Using the flags below in the btree inode also makes us achieve the
3700 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
3701 * writeback for all dirty pages and before filemap_fdatawait_range()
3702 * is called, the writeback for all dirty pages had already finished
3703 * with errors - because we were not using AS_EIO/AS_ENOSPC,
3704 * filemap_fdatawait_range() would return success, as it could not know
3705 * that writeback errors happened (the pages were no longer tagged for
3706 * writeback).
3707 */
3708 switch (eb->log_index) {
3709 case -1:
3710 set_bit(BTRFS_INODE_BTREE_ERR, &btree_ino->runtime_flags);
3711 break;
3712 case 0:
3713 set_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags);
3714 break;
3715 case 1:
3716 set_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags);
3717 break;
3718 default:
3719 BUG(); /* unexpected, logic error */
3720 }
3721}
3722
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02003723static void end_bio_extent_buffer_writepage(struct bio *bio)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003724{
Kent Overstreet2c30c712013-11-07 12:20:26 -08003725 struct bio_vec *bvec;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003726 struct extent_buffer *eb;
Kent Overstreet2c30c712013-11-07 12:20:26 -08003727 int i, done;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003728
Kent Overstreet2c30c712013-11-07 12:20:26 -08003729 bio_for_each_segment_all(bvec, bio, i) {
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003730 struct page *page = bvec->bv_page;
3731
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003732 eb = (struct extent_buffer *)page->private;
3733 BUG_ON(!eb);
3734 done = atomic_dec_and_test(&eb->io_pages);
3735
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02003736 if (bio->bi_error ||
3737 test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003738 ClearPageUptodate(page);
Filipe Manana656f30d2014-09-26 12:25:56 +01003739 set_btree_ioerr(page);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003740 }
3741
3742 end_page_writeback(page);
3743
3744 if (!done)
3745 continue;
3746
3747 end_extent_buffer_writeback(eb);
Kent Overstreet2c30c712013-11-07 12:20:26 -08003748 }
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003749
3750 bio_put(bio);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003751}
3752
Chris Mason0e378df2014-05-19 20:55:27 -07003753static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003754 struct btrfs_fs_info *fs_info,
3755 struct writeback_control *wbc,
3756 struct extent_page_data *epd)
3757{
3758 struct block_device *bdev = fs_info->fs_devices->latest_bdev;
Josef Bacikf28491e2013-12-16 13:24:27 -05003759 struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003760 u64 offset = eb->start;
3761 unsigned long i, num_pages;
Josef Bacikde0022b2012-09-25 14:25:58 -04003762 unsigned long bio_flags = 0;
Josef Bacikd4c7ca82013-04-19 19:49:09 -04003763 int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META;
Josef Bacikd7dbe9e2012-04-23 14:00:51 -04003764 int ret = 0;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003765
Filipe Manana656f30d2014-09-26 12:25:56 +01003766 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003767 num_pages = num_extent_pages(eb->start, eb->len);
3768 atomic_set(&eb->io_pages, num_pages);
Josef Bacikde0022b2012-09-25 14:25:58 -04003769 if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
3770 bio_flags = EXTENT_BIO_TREE_LOG;
3771
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003772 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02003773 struct page *p = eb->pages[i];
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003774
3775 clear_page_dirty_for_io(p);
3776 set_page_writeback(p);
Chris Masonda2f0f72015-07-02 13:57:22 -07003777 ret = submit_extent_page(rw, tree, wbc, p, offset >> 9,
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003778 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3779 -1, end_bio_extent_buffer_writepage,
Filipe Manana005efed2015-09-14 09:09:31 +01003780 0, epd->bio_flags, bio_flags, false);
Josef Bacikde0022b2012-09-25 14:25:58 -04003781 epd->bio_flags = bio_flags;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003782 if (ret) {
Filipe Manana656f30d2014-09-26 12:25:56 +01003783 set_btree_ioerr(p);
Filipe Manana55e3bd22014-09-22 17:41:04 +01003784 end_page_writeback(p);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003785 if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3786 end_extent_buffer_writeback(eb);
3787 ret = -EIO;
3788 break;
3789 }
3790 offset += PAGE_CACHE_SIZE;
3791 update_nr_written(p, wbc, 1);
3792 unlock_page(p);
3793 }
3794
3795 if (unlikely(ret)) {
3796 for (; i < num_pages; i++) {
Chris Masonbbf65cf2014-10-04 09:56:45 -07003797 struct page *p = eb->pages[i];
Liu Bo81465022014-09-23 22:22:33 +08003798 clear_page_dirty_for_io(p);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003799 unlock_page(p);
3800 }
3801 }
3802
3803 return ret;
3804}
3805
3806int btree_write_cache_pages(struct address_space *mapping,
3807 struct writeback_control *wbc)
3808{
3809 struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3810 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3811 struct extent_buffer *eb, *prev_eb = NULL;
3812 struct extent_page_data epd = {
3813 .bio = NULL,
3814 .tree = tree,
3815 .extent_locked = 0,
3816 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Josef Bacikde0022b2012-09-25 14:25:58 -04003817 .bio_flags = 0,
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003818 };
3819 int ret = 0;
3820 int done = 0;
3821 int nr_to_write_done = 0;
3822 struct pagevec pvec;
3823 int nr_pages;
3824 pgoff_t index;
3825 pgoff_t end; /* Inclusive */
3826 int scanned = 0;
3827 int tag;
3828
3829 pagevec_init(&pvec, 0);
3830 if (wbc->range_cyclic) {
3831 index = mapping->writeback_index; /* Start from prev offset */
3832 end = -1;
3833 } else {
3834 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3835 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3836 scanned = 1;
3837 }
3838 if (wbc->sync_mode == WB_SYNC_ALL)
3839 tag = PAGECACHE_TAG_TOWRITE;
3840 else
3841 tag = PAGECACHE_TAG_DIRTY;
3842retry:
3843 if (wbc->sync_mode == WB_SYNC_ALL)
3844 tag_pages_for_writeback(mapping, index, end);
3845 while (!done && !nr_to_write_done && (index <= end) &&
3846 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3847 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3848 unsigned i;
3849
3850 scanned = 1;
3851 for (i = 0; i < nr_pages; i++) {
3852 struct page *page = pvec.pages[i];
3853
3854 if (!PagePrivate(page))
3855 continue;
3856
3857 if (!wbc->range_cyclic && page->index > end) {
3858 done = 1;
3859 break;
3860 }
3861
Josef Bacikb5bae262012-09-14 13:43:01 -04003862 spin_lock(&mapping->private_lock);
3863 if (!PagePrivate(page)) {
3864 spin_unlock(&mapping->private_lock);
3865 continue;
3866 }
3867
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003868 eb = (struct extent_buffer *)page->private;
Josef Bacikb5bae262012-09-14 13:43:01 -04003869
3870 /*
3871 * Shouldn't happen and normally this would be a BUG_ON
3872 * but no sense in crashing the users box for something
3873 * we can survive anyway.
3874 */
Dulshani Gunawardhanafae7f212013-10-31 10:30:08 +05303875 if (WARN_ON(!eb)) {
Josef Bacikb5bae262012-09-14 13:43:01 -04003876 spin_unlock(&mapping->private_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003877 continue;
3878 }
3879
Josef Bacikb5bae262012-09-14 13:43:01 -04003880 if (eb == prev_eb) {
3881 spin_unlock(&mapping->private_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003882 continue;
3883 }
3884
Josef Bacikb5bae262012-09-14 13:43:01 -04003885 ret = atomic_inc_not_zero(&eb->refs);
3886 spin_unlock(&mapping->private_lock);
3887 if (!ret)
3888 continue;
3889
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003890 prev_eb = eb;
3891 ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3892 if (!ret) {
3893 free_extent_buffer(eb);
3894 continue;
3895 }
3896
3897 ret = write_one_eb(eb, fs_info, wbc, &epd);
3898 if (ret) {
3899 done = 1;
3900 free_extent_buffer(eb);
3901 break;
3902 }
3903 free_extent_buffer(eb);
3904
3905 /*
3906 * the filesystem may choose to bump up nr_to_write.
3907 * We have to make sure to honor the new nr_to_write
3908 * at any time
3909 */
3910 nr_to_write_done = wbc->nr_to_write <= 0;
3911 }
3912 pagevec_release(&pvec);
3913 cond_resched();
3914 }
3915 if (!scanned && !done) {
3916 /*
3917 * We hit the last page and there is more work to be done: wrap
3918 * back to the start of the file
3919 */
3920 scanned = 1;
3921 index = 0;
3922 goto retry;
3923 }
3924 flush_write_bio(&epd);
3925 return ret;
3926}
3927
Chris Masond1310b22008-01-24 16:13:08 -05003928/**
3929 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3930 * @mapping: address space structure to write
3931 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3932 * @writepage: function called for each page
3933 * @data: data passed to writepage function
3934 *
3935 * If a page is already under I/O, write_cache_pages() skips it, even
3936 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
3937 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
3938 * and msync() need to guarantee that all the data which was dirty at the time
3939 * the call was made get new I/O started against them. If wbc->sync_mode is
3940 * WB_SYNC_ALL then we were called for data integrity and we must wait for
3941 * existing IO to complete.
3942 */
Chris Mason4bef0842008-09-08 11:18:08 -04003943static int extent_write_cache_pages(struct extent_io_tree *tree,
3944 struct address_space *mapping,
3945 struct writeback_control *wbc,
Chris Masond2c3f4f2008-11-19 12:44:22 -05003946 writepage_t writepage, void *data,
3947 void (*flush_fn)(void *))
Chris Masond1310b22008-01-24 16:13:08 -05003948{
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04003949 struct inode *inode = mapping->host;
Chris Masond1310b22008-01-24 16:13:08 -05003950 int ret = 0;
3951 int done = 0;
Filipe Manana61391d52014-05-09 17:17:40 +01003952 int err = 0;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04003953 int nr_to_write_done = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003954 struct pagevec pvec;
3955 int nr_pages;
3956 pgoff_t index;
3957 pgoff_t end; /* Inclusive */
3958 int scanned = 0;
Josef Bacikf7aaa062011-07-15 21:26:38 +00003959 int tag;
Chris Masond1310b22008-01-24 16:13:08 -05003960
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04003961 /*
3962 * We have to hold onto the inode so that ordered extents can do their
3963 * work when the IO finishes. The alternative to this is failing to add
3964 * an ordered extent if the igrab() fails there and that is a huge pain
3965 * to deal with, so instead just hold onto the inode throughout the
3966 * writepages operation. If it fails here we are freeing up the inode
3967 * anyway and we'd rather not waste our time writing out stuff that is
3968 * going to be truncated anyway.
3969 */
3970 if (!igrab(inode))
3971 return 0;
3972
Chris Masond1310b22008-01-24 16:13:08 -05003973 pagevec_init(&pvec, 0);
3974 if (wbc->range_cyclic) {
3975 index = mapping->writeback_index; /* Start from prev offset */
3976 end = -1;
3977 } else {
3978 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3979 end = wbc->range_end >> PAGE_CACHE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05003980 scanned = 1;
3981 }
Josef Bacikf7aaa062011-07-15 21:26:38 +00003982 if (wbc->sync_mode == WB_SYNC_ALL)
3983 tag = PAGECACHE_TAG_TOWRITE;
3984 else
3985 tag = PAGECACHE_TAG_DIRTY;
Chris Masond1310b22008-01-24 16:13:08 -05003986retry:
Josef Bacikf7aaa062011-07-15 21:26:38 +00003987 if (wbc->sync_mode == WB_SYNC_ALL)
3988 tag_pages_for_writeback(mapping, index, end);
Chris Masonf85d7d6c2009-09-18 16:03:16 -04003989 while (!done && !nr_to_write_done && (index <= end) &&
Josef Bacikf7aaa062011-07-15 21:26:38 +00003990 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3991 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
Chris Masond1310b22008-01-24 16:13:08 -05003992 unsigned i;
3993
3994 scanned = 1;
3995 for (i = 0; i < nr_pages; i++) {
3996 struct page *page = pvec.pages[i];
3997
3998 /*
3999 * At this point we hold neither mapping->tree_lock nor
4000 * lock on the page itself: the page may be truncated or
4001 * invalidated (changing page->mapping to NULL), or even
4002 * swizzled back from swapper_space to tmpfs file
4003 * mapping
4004 */
Josef Bacikc8f2f242013-02-11 11:33:00 -05004005 if (!trylock_page(page)) {
4006 flush_fn(data);
4007 lock_page(page);
Chris Mason01d658f2011-11-01 10:08:06 -04004008 }
Chris Masond1310b22008-01-24 16:13:08 -05004009
4010 if (unlikely(page->mapping != mapping)) {
4011 unlock_page(page);
4012 continue;
4013 }
4014
4015 if (!wbc->range_cyclic && page->index > end) {
4016 done = 1;
4017 unlock_page(page);
4018 continue;
4019 }
4020
Chris Masond2c3f4f2008-11-19 12:44:22 -05004021 if (wbc->sync_mode != WB_SYNC_NONE) {
Chris Mason0e6bd952008-11-20 10:46:35 -05004022 if (PageWriteback(page))
4023 flush_fn(data);
Chris Masond1310b22008-01-24 16:13:08 -05004024 wait_on_page_writeback(page);
Chris Masond2c3f4f2008-11-19 12:44:22 -05004025 }
Chris Masond1310b22008-01-24 16:13:08 -05004026
4027 if (PageWriteback(page) ||
4028 !clear_page_dirty_for_io(page)) {
4029 unlock_page(page);
4030 continue;
4031 }
4032
4033 ret = (*writepage)(page, wbc, data);
4034
4035 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
4036 unlock_page(page);
4037 ret = 0;
4038 }
Filipe Manana61391d52014-05-09 17:17:40 +01004039 if (!err && ret < 0)
4040 err = ret;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04004041
4042 /*
4043 * the filesystem may choose to bump up nr_to_write.
4044 * We have to make sure to honor the new nr_to_write
4045 * at any time
4046 */
4047 nr_to_write_done = wbc->nr_to_write <= 0;
Chris Masond1310b22008-01-24 16:13:08 -05004048 }
4049 pagevec_release(&pvec);
4050 cond_resched();
4051 }
Filipe Manana61391d52014-05-09 17:17:40 +01004052 if (!scanned && !done && !err) {
Chris Masond1310b22008-01-24 16:13:08 -05004053 /*
4054 * We hit the last page and there is more work to be done: wrap
4055 * back to the start of the file
4056 */
4057 scanned = 1;
4058 index = 0;
4059 goto retry;
4060 }
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04004061 btrfs_add_delayed_iput(inode);
Filipe Manana61391d52014-05-09 17:17:40 +01004062 return err;
Chris Masond1310b22008-01-24 16:13:08 -05004063}
Chris Masond1310b22008-01-24 16:13:08 -05004064
Chris Masonffbd5172009-04-20 15:50:09 -04004065static void flush_epd_write_bio(struct extent_page_data *epd)
4066{
4067 if (epd->bio) {
Jeff Mahoney355808c2011-10-03 23:23:14 -04004068 int rw = WRITE;
4069 int ret;
4070
Chris Masonffbd5172009-04-20 15:50:09 -04004071 if (epd->sync_io)
Jeff Mahoney355808c2011-10-03 23:23:14 -04004072 rw = WRITE_SYNC;
4073
Josef Bacikde0022b2012-09-25 14:25:58 -04004074 ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01004075 BUG_ON(ret < 0); /* -ENOMEM */
Chris Masonffbd5172009-04-20 15:50:09 -04004076 epd->bio = NULL;
4077 }
4078}
4079
Chris Masond2c3f4f2008-11-19 12:44:22 -05004080static noinline void flush_write_bio(void *data)
4081{
4082 struct extent_page_data *epd = data;
Chris Masonffbd5172009-04-20 15:50:09 -04004083 flush_epd_write_bio(epd);
Chris Masond2c3f4f2008-11-19 12:44:22 -05004084}
4085
Chris Masond1310b22008-01-24 16:13:08 -05004086int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
4087 get_extent_t *get_extent,
4088 struct writeback_control *wbc)
4089{
4090 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05004091 struct extent_page_data epd = {
4092 .bio = NULL,
4093 .tree = tree,
4094 .get_extent = get_extent,
Chris Mason771ed682008-11-06 22:02:51 -05004095 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04004096 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Josef Bacikde0022b2012-09-25 14:25:58 -04004097 .bio_flags = 0,
Chris Masond1310b22008-01-24 16:13:08 -05004098 };
Chris Masond1310b22008-01-24 16:13:08 -05004099
Chris Masond1310b22008-01-24 16:13:08 -05004100 ret = __extent_writepage(page, wbc, &epd);
4101
Chris Masonffbd5172009-04-20 15:50:09 -04004102 flush_epd_write_bio(&epd);
Chris Masond1310b22008-01-24 16:13:08 -05004103 return ret;
4104}
Chris Masond1310b22008-01-24 16:13:08 -05004105
Chris Mason771ed682008-11-06 22:02:51 -05004106int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
4107 u64 start, u64 end, get_extent_t *get_extent,
4108 int mode)
4109{
4110 int ret = 0;
4111 struct address_space *mapping = inode->i_mapping;
4112 struct page *page;
4113 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
4114 PAGE_CACHE_SHIFT;
4115
4116 struct extent_page_data epd = {
4117 .bio = NULL,
4118 .tree = tree,
4119 .get_extent = get_extent,
4120 .extent_locked = 1,
Chris Masonffbd5172009-04-20 15:50:09 -04004121 .sync_io = mode == WB_SYNC_ALL,
Josef Bacikde0022b2012-09-25 14:25:58 -04004122 .bio_flags = 0,
Chris Mason771ed682008-11-06 22:02:51 -05004123 };
4124 struct writeback_control wbc_writepages = {
Chris Mason771ed682008-11-06 22:02:51 -05004125 .sync_mode = mode,
Chris Mason771ed682008-11-06 22:02:51 -05004126 .nr_to_write = nr_pages * 2,
4127 .range_start = start,
4128 .range_end = end + 1,
4129 };
4130
Chris Masond3977122009-01-05 21:25:51 -05004131 while (start <= end) {
Chris Mason771ed682008-11-06 22:02:51 -05004132 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
4133 if (clear_page_dirty_for_io(page))
4134 ret = __extent_writepage(page, &wbc_writepages, &epd);
4135 else {
4136 if (tree->ops && tree->ops->writepage_end_io_hook)
4137 tree->ops->writepage_end_io_hook(page, start,
4138 start + PAGE_CACHE_SIZE - 1,
4139 NULL, 1);
4140 unlock_page(page);
4141 }
4142 page_cache_release(page);
4143 start += PAGE_CACHE_SIZE;
4144 }
4145
Chris Masonffbd5172009-04-20 15:50:09 -04004146 flush_epd_write_bio(&epd);
Chris Mason771ed682008-11-06 22:02:51 -05004147 return ret;
4148}
Chris Masond1310b22008-01-24 16:13:08 -05004149
4150int extent_writepages(struct extent_io_tree *tree,
4151 struct address_space *mapping,
4152 get_extent_t *get_extent,
4153 struct writeback_control *wbc)
4154{
4155 int ret = 0;
4156 struct extent_page_data epd = {
4157 .bio = NULL,
4158 .tree = tree,
4159 .get_extent = get_extent,
Chris Mason771ed682008-11-06 22:02:51 -05004160 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04004161 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Josef Bacikde0022b2012-09-25 14:25:58 -04004162 .bio_flags = 0,
Chris Masond1310b22008-01-24 16:13:08 -05004163 };
4164
Chris Mason4bef0842008-09-08 11:18:08 -04004165 ret = extent_write_cache_pages(tree, mapping, wbc,
Chris Masond2c3f4f2008-11-19 12:44:22 -05004166 __extent_writepage, &epd,
4167 flush_write_bio);
Chris Masonffbd5172009-04-20 15:50:09 -04004168 flush_epd_write_bio(&epd);
Chris Masond1310b22008-01-24 16:13:08 -05004169 return ret;
4170}
Chris Masond1310b22008-01-24 16:13:08 -05004171
4172int extent_readpages(struct extent_io_tree *tree,
4173 struct address_space *mapping,
4174 struct list_head *pages, unsigned nr_pages,
4175 get_extent_t get_extent)
4176{
4177 struct bio *bio = NULL;
4178 unsigned page_idx;
Chris Masonc8b97812008-10-29 14:49:59 -04004179 unsigned long bio_flags = 0;
Liu Bo67c96842012-07-20 21:43:09 -06004180 struct page *pagepool[16];
4181 struct page *page;
Miao Xie125bac012013-07-25 19:22:37 +08004182 struct extent_map *em_cached = NULL;
Liu Bo67c96842012-07-20 21:43:09 -06004183 int nr = 0;
Filipe Manana808f80b2015-09-28 09:56:26 +01004184 u64 prev_em_start = (u64)-1;
Chris Masond1310b22008-01-24 16:13:08 -05004185
Chris Masond1310b22008-01-24 16:13:08 -05004186 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
Liu Bo67c96842012-07-20 21:43:09 -06004187 page = list_entry(pages->prev, struct page, lru);
Chris Masond1310b22008-01-24 16:13:08 -05004188
4189 prefetchw(&page->flags);
4190 list_del(&page->lru);
Liu Bo67c96842012-07-20 21:43:09 -06004191 if (add_to_page_cache_lru(page, mapping,
Itaru Kitayama43e817a2011-04-25 19:43:51 -04004192 page->index, GFP_NOFS)) {
Liu Bo67c96842012-07-20 21:43:09 -06004193 page_cache_release(page);
4194 continue;
Chris Masond1310b22008-01-24 16:13:08 -05004195 }
Liu Bo67c96842012-07-20 21:43:09 -06004196
4197 pagepool[nr++] = page;
4198 if (nr < ARRAY_SIZE(pagepool))
4199 continue;
Miao Xie125bac012013-07-25 19:22:37 +08004200 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
Filipe Manana808f80b2015-09-28 09:56:26 +01004201 &bio, 0, &bio_flags, READ, &prev_em_start);
Liu Bo67c96842012-07-20 21:43:09 -06004202 nr = 0;
Chris Masond1310b22008-01-24 16:13:08 -05004203 }
Miao Xie99740902013-07-25 19:22:36 +08004204 if (nr)
Miao Xie125bac012013-07-25 19:22:37 +08004205 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
Filipe Manana808f80b2015-09-28 09:56:26 +01004206 &bio, 0, &bio_flags, READ, &prev_em_start);
Liu Bo67c96842012-07-20 21:43:09 -06004207
Miao Xie125bac012013-07-25 19:22:37 +08004208 if (em_cached)
4209 free_extent_map(em_cached);
4210
Chris Masond1310b22008-01-24 16:13:08 -05004211 BUG_ON(!list_empty(pages));
4212 if (bio)
Jeff Mahoney79787ea2012-03-12 16:03:00 +01004213 return submit_one_bio(READ, bio, 0, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05004214 return 0;
4215}
Chris Masond1310b22008-01-24 16:13:08 -05004216
4217/*
4218 * basic invalidatepage code, this waits on any locked or writeback
4219 * ranges corresponding to the page, and then deletes any extent state
4220 * records from the tree
4221 */
4222int extent_invalidatepage(struct extent_io_tree *tree,
4223 struct page *page, unsigned long offset)
4224{
Josef Bacik2ac55d42010-02-03 19:33:23 +00004225 struct extent_state *cached_state = NULL;
Miao Xie4eee4fa2012-12-21 09:17:45 +00004226 u64 start = page_offset(page);
Chris Masond1310b22008-01-24 16:13:08 -05004227 u64 end = start + PAGE_CACHE_SIZE - 1;
4228 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
4229
Qu Wenruofda28322013-02-26 08:10:22 +00004230 start += ALIGN(offset, blocksize);
Chris Masond1310b22008-01-24 16:13:08 -05004231 if (start > end)
4232 return 0;
4233
David Sterbaff13db42015-12-03 14:30:40 +01004234 lock_extent_bits(tree, start, end, &cached_state);
Chris Mason1edbb732009-09-02 13:24:36 -04004235 wait_on_page_writeback(page);
Chris Masond1310b22008-01-24 16:13:08 -05004236 clear_extent_bit(tree, start, end,
Josef Bacik32c00af2009-10-08 13:34:05 -04004237 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4238 EXTENT_DO_ACCOUNTING,
Josef Bacik2ac55d42010-02-03 19:33:23 +00004239 1, 1, &cached_state, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05004240 return 0;
4241}
Chris Masond1310b22008-01-24 16:13:08 -05004242
4243/*
Chris Mason7b13b7b2008-04-18 10:29:50 -04004244 * a helper for releasepage, this tests for areas of the page that
4245 * are locked or under IO and drops the related state bits if it is safe
4246 * to drop the page.
4247 */
Eric Sandeen48a3b632013-04-25 20:41:01 +00004248static int try_release_extent_state(struct extent_map_tree *map,
4249 struct extent_io_tree *tree,
4250 struct page *page, gfp_t mask)
Chris Mason7b13b7b2008-04-18 10:29:50 -04004251{
Miao Xie4eee4fa2012-12-21 09:17:45 +00004252 u64 start = page_offset(page);
Chris Mason7b13b7b2008-04-18 10:29:50 -04004253 u64 end = start + PAGE_CACHE_SIZE - 1;
4254 int ret = 1;
4255
Chris Mason211f90e2008-07-18 11:56:15 -04004256 if (test_range_bit(tree, start, end,
Chris Mason8b62b722009-09-02 16:53:46 -04004257 EXTENT_IOBITS, 0, NULL))
Chris Mason7b13b7b2008-04-18 10:29:50 -04004258 ret = 0;
4259 else {
4260 if ((mask & GFP_NOFS) == GFP_NOFS)
4261 mask = GFP_NOFS;
Chris Mason11ef1602009-09-23 20:28:46 -04004262 /*
4263 * at this point we can safely clear everything except the
4264 * locked bit and the nodatasum bit
4265 */
Chris Masone3f24cc2011-02-14 12:52:08 -05004266 ret = clear_extent_bit(tree, start, end,
Chris Mason11ef1602009-09-23 20:28:46 -04004267 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
4268 0, 0, NULL, mask);
Chris Masone3f24cc2011-02-14 12:52:08 -05004269
4270 /* if clear_extent_bit failed for enomem reasons,
4271 * we can't allow the release to continue.
4272 */
4273 if (ret < 0)
4274 ret = 0;
4275 else
4276 ret = 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04004277 }
4278 return ret;
4279}
Chris Mason7b13b7b2008-04-18 10:29:50 -04004280
4281/*
Chris Masond1310b22008-01-24 16:13:08 -05004282 * a helper for releasepage. As long as there are no locked extents
4283 * in the range corresponding to the page, both state records and extent
4284 * map records are removed
4285 */
4286int try_release_extent_mapping(struct extent_map_tree *map,
Chris Mason70dec802008-01-29 09:59:12 -05004287 struct extent_io_tree *tree, struct page *page,
4288 gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05004289{
4290 struct extent_map *em;
Miao Xie4eee4fa2012-12-21 09:17:45 +00004291 u64 start = page_offset(page);
Chris Masond1310b22008-01-24 16:13:08 -05004292 u64 end = start + PAGE_CACHE_SIZE - 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04004293
Mel Gormand0164ad2015-11-06 16:28:21 -08004294 if (gfpflags_allow_blocking(mask) &&
Byongho Leeee221842015-12-15 01:42:10 +09004295 page->mapping->host->i_size > SZ_16M) {
Yan39b56372008-02-15 10:40:50 -05004296 u64 len;
Chris Mason70dec802008-01-29 09:59:12 -05004297 while (start <= end) {
Yan39b56372008-02-15 10:40:50 -05004298 len = end - start + 1;
Chris Mason890871b2009-09-02 16:24:52 -04004299 write_lock(&map->lock);
Yan39b56372008-02-15 10:40:50 -05004300 em = lookup_extent_mapping(map, start, len);
Tsutomu Itoh285190d2012-02-16 16:23:58 +09004301 if (!em) {
Chris Mason890871b2009-09-02 16:24:52 -04004302 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05004303 break;
4304 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04004305 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
4306 em->start != start) {
Chris Mason890871b2009-09-02 16:24:52 -04004307 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05004308 free_extent_map(em);
4309 break;
4310 }
4311 if (!test_range_bit(tree, em->start,
4312 extent_map_end(em) - 1,
Chris Mason8b62b722009-09-02 16:53:46 -04004313 EXTENT_LOCKED | EXTENT_WRITEBACK,
Chris Mason9655d292009-09-02 15:22:30 -04004314 0, NULL)) {
Chris Mason70dec802008-01-29 09:59:12 -05004315 remove_extent_mapping(map, em);
4316 /* once for the rb tree */
4317 free_extent_map(em);
4318 }
4319 start = extent_map_end(em);
Chris Mason890871b2009-09-02 16:24:52 -04004320 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05004321
4322 /* once for us */
Chris Masond1310b22008-01-24 16:13:08 -05004323 free_extent_map(em);
4324 }
Chris Masond1310b22008-01-24 16:13:08 -05004325 }
Chris Mason7b13b7b2008-04-18 10:29:50 -04004326 return try_release_extent_state(map, tree, page, mask);
Chris Masond1310b22008-01-24 16:13:08 -05004327}
Chris Masond1310b22008-01-24 16:13:08 -05004328
Chris Masonec29ed52011-02-23 16:23:20 -05004329/*
4330 * helper function for fiemap, which doesn't want to see any holes.
4331 * This maps until we find something past 'last'
4332 */
4333static struct extent_map *get_extent_skip_holes(struct inode *inode,
4334 u64 offset,
4335 u64 last,
4336 get_extent_t *get_extent)
4337{
4338 u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
4339 struct extent_map *em;
4340 u64 len;
4341
4342 if (offset >= last)
4343 return NULL;
4344
Dulshani Gunawardhana67871252013-10-31 10:33:04 +05304345 while (1) {
Chris Masonec29ed52011-02-23 16:23:20 -05004346 len = last - offset;
4347 if (len == 0)
4348 break;
Qu Wenruofda28322013-02-26 08:10:22 +00004349 len = ALIGN(len, sectorsize);
Chris Masonec29ed52011-02-23 16:23:20 -05004350 em = get_extent(inode, NULL, 0, offset, len, 0);
David Sterbac7040052011-04-19 18:00:01 +02004351 if (IS_ERR_OR_NULL(em))
Chris Masonec29ed52011-02-23 16:23:20 -05004352 return em;
4353
4354 /* if this isn't a hole return it */
4355 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
4356 em->block_start != EXTENT_MAP_HOLE) {
4357 return em;
4358 }
4359
4360 /* this is a hole, advance to the next extent */
4361 offset = extent_map_end(em);
4362 free_extent_map(em);
4363 if (offset >= last)
4364 break;
4365 }
4366 return NULL;
4367}
4368
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004369int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4370 __u64 start, __u64 len, get_extent_t *get_extent)
4371{
Josef Bacik975f84f2010-11-23 19:36:57 +00004372 int ret = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004373 u64 off = start;
4374 u64 max = start + len;
4375 u32 flags = 0;
Josef Bacik975f84f2010-11-23 19:36:57 +00004376 u32 found_type;
4377 u64 last;
Chris Masonec29ed52011-02-23 16:23:20 -05004378 u64 last_for_get_extent = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004379 u64 disko = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05004380 u64 isize = i_size_read(inode);
Josef Bacik975f84f2010-11-23 19:36:57 +00004381 struct btrfs_key found_key;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004382 struct extent_map *em = NULL;
Josef Bacik2ac55d42010-02-03 19:33:23 +00004383 struct extent_state *cached_state = NULL;
Josef Bacik975f84f2010-11-23 19:36:57 +00004384 struct btrfs_path *path;
Josef Bacikdc046b12014-09-10 16:20:45 -04004385 struct btrfs_root *root = BTRFS_I(inode)->root;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004386 int end = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05004387 u64 em_start = 0;
4388 u64 em_len = 0;
4389 u64 em_end = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004390
4391 if (len == 0)
4392 return -EINVAL;
4393
Josef Bacik975f84f2010-11-23 19:36:57 +00004394 path = btrfs_alloc_path();
4395 if (!path)
4396 return -ENOMEM;
4397 path->leave_spinning = 1;
4398
Qu Wenruo2c919432014-07-18 09:55:43 +08004399 start = round_down(start, BTRFS_I(inode)->root->sectorsize);
4400 len = round_up(max, BTRFS_I(inode)->root->sectorsize) - start;
Josef Bacik4d479cf2011-11-17 11:34:31 -05004401
Chris Masonec29ed52011-02-23 16:23:20 -05004402 /*
4403 * lookup the last file extent. We're not using i_size here
4404 * because there might be preallocation past i_size
4405 */
Josef Bacikdc046b12014-09-10 16:20:45 -04004406 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), -1,
4407 0);
Josef Bacik975f84f2010-11-23 19:36:57 +00004408 if (ret < 0) {
4409 btrfs_free_path(path);
4410 return ret;
4411 }
4412 WARN_ON(!ret);
4413 path->slots[0]--;
Josef Bacik975f84f2010-11-23 19:36:57 +00004414 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
David Sterba962a2982014-06-04 18:41:45 +02004415 found_type = found_key.type;
Josef Bacik975f84f2010-11-23 19:36:57 +00004416
Chris Masonec29ed52011-02-23 16:23:20 -05004417 /* No extents, but there might be delalloc bits */
Li Zefan33345d012011-04-20 10:31:50 +08004418 if (found_key.objectid != btrfs_ino(inode) ||
Josef Bacik975f84f2010-11-23 19:36:57 +00004419 found_type != BTRFS_EXTENT_DATA_KEY) {
Chris Masonec29ed52011-02-23 16:23:20 -05004420 /* have to trust i_size as the end */
4421 last = (u64)-1;
4422 last_for_get_extent = isize;
4423 } else {
4424 /*
4425 * remember the start of the last extent. There are a
4426 * bunch of different factors that go into the length of the
4427 * extent, so its much less complex to remember where it started
4428 */
4429 last = found_key.offset;
4430 last_for_get_extent = last + 1;
Josef Bacik975f84f2010-11-23 19:36:57 +00004431 }
Liu Bofe09e162013-09-22 12:54:23 +08004432 btrfs_release_path(path);
Josef Bacik975f84f2010-11-23 19:36:57 +00004433
Chris Masonec29ed52011-02-23 16:23:20 -05004434 /*
4435 * we might have some extents allocated but more delalloc past those
4436 * extents. so, we trust isize unless the start of the last extent is
4437 * beyond isize
4438 */
4439 if (last < isize) {
4440 last = (u64)-1;
4441 last_for_get_extent = isize;
4442 }
4443
David Sterbaff13db42015-12-03 14:30:40 +01004444 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1,
Jeff Mahoneyd0082372012-03-01 14:57:19 +01004445 &cached_state);
Chris Masonec29ed52011-02-23 16:23:20 -05004446
Josef Bacik4d479cf2011-11-17 11:34:31 -05004447 em = get_extent_skip_holes(inode, start, last_for_get_extent,
Chris Masonec29ed52011-02-23 16:23:20 -05004448 get_extent);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004449 if (!em)
4450 goto out;
4451 if (IS_ERR(em)) {
4452 ret = PTR_ERR(em);
4453 goto out;
4454 }
Josef Bacik975f84f2010-11-23 19:36:57 +00004455
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004456 while (!end) {
Josef Bacikb76bb702013-07-05 13:52:51 -04004457 u64 offset_in_extent = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004458
Chris Masonea8efc72011-03-08 11:54:40 -05004459 /* break if the extent we found is outside the range */
4460 if (em->start >= max || extent_map_end(em) < off)
4461 break;
4462
4463 /*
4464 * get_extent may return an extent that starts before our
4465 * requested range. We have to make sure the ranges
4466 * we return to fiemap always move forward and don't
4467 * overlap, so adjust the offsets here
4468 */
4469 em_start = max(em->start, off);
4470
4471 /*
4472 * record the offset from the start of the extent
Josef Bacikb76bb702013-07-05 13:52:51 -04004473 * for adjusting the disk offset below. Only do this if the
4474 * extent isn't compressed since our in ram offset may be past
4475 * what we have actually allocated on disk.
Chris Masonea8efc72011-03-08 11:54:40 -05004476 */
Josef Bacikb76bb702013-07-05 13:52:51 -04004477 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4478 offset_in_extent = em_start - em->start;
Chris Masonec29ed52011-02-23 16:23:20 -05004479 em_end = extent_map_end(em);
Chris Masonea8efc72011-03-08 11:54:40 -05004480 em_len = em_end - em_start;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004481 disko = 0;
4482 flags = 0;
4483
Chris Masonea8efc72011-03-08 11:54:40 -05004484 /*
4485 * bump off for our next call to get_extent
4486 */
4487 off = extent_map_end(em);
4488 if (off >= max)
4489 end = 1;
4490
Heiko Carstens93dbfad2009-04-03 10:33:45 -04004491 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004492 end = 1;
4493 flags |= FIEMAP_EXTENT_LAST;
Heiko Carstens93dbfad2009-04-03 10:33:45 -04004494 } else if (em->block_start == EXTENT_MAP_INLINE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004495 flags |= (FIEMAP_EXTENT_DATA_INLINE |
4496 FIEMAP_EXTENT_NOT_ALIGNED);
Heiko Carstens93dbfad2009-04-03 10:33:45 -04004497 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004498 flags |= (FIEMAP_EXTENT_DELALLOC |
4499 FIEMAP_EXTENT_UNKNOWN);
Josef Bacikdc046b12014-09-10 16:20:45 -04004500 } else if (fieinfo->fi_extents_max) {
4501 u64 bytenr = em->block_start -
4502 (em->start - em->orig_start);
Liu Bofe09e162013-09-22 12:54:23 +08004503
Chris Masonea8efc72011-03-08 11:54:40 -05004504 disko = em->block_start + offset_in_extent;
Liu Bofe09e162013-09-22 12:54:23 +08004505
4506 /*
4507 * As btrfs supports shared space, this information
4508 * can be exported to userspace tools via
Josef Bacikdc046b12014-09-10 16:20:45 -04004509 * flag FIEMAP_EXTENT_SHARED. If fi_extents_max == 0
4510 * then we're just getting a count and we can skip the
4511 * lookup stuff.
Liu Bofe09e162013-09-22 12:54:23 +08004512 */
Josef Bacikdc046b12014-09-10 16:20:45 -04004513 ret = btrfs_check_shared(NULL, root->fs_info,
4514 root->objectid,
4515 btrfs_ino(inode), bytenr);
4516 if (ret < 0)
Liu Bofe09e162013-09-22 12:54:23 +08004517 goto out_free;
Josef Bacikdc046b12014-09-10 16:20:45 -04004518 if (ret)
Liu Bofe09e162013-09-22 12:54:23 +08004519 flags |= FIEMAP_EXTENT_SHARED;
Josef Bacikdc046b12014-09-10 16:20:45 -04004520 ret = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004521 }
4522 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4523 flags |= FIEMAP_EXTENT_ENCODED;
Josef Bacik0d2b2372015-05-19 10:44:04 -04004524 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4525 flags |= FIEMAP_EXTENT_UNWRITTEN;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004526
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004527 free_extent_map(em);
4528 em = NULL;
Chris Masonec29ed52011-02-23 16:23:20 -05004529 if ((em_start >= last) || em_len == (u64)-1 ||
4530 (last == (u64)-1 && isize <= em_end)) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004531 flags |= FIEMAP_EXTENT_LAST;
4532 end = 1;
4533 }
4534
Chris Masonec29ed52011-02-23 16:23:20 -05004535 /* now scan forward to see if this is really the last extent. */
4536 em = get_extent_skip_holes(inode, off, last_for_get_extent,
4537 get_extent);
4538 if (IS_ERR(em)) {
4539 ret = PTR_ERR(em);
4540 goto out;
4541 }
4542 if (!em) {
Josef Bacik975f84f2010-11-23 19:36:57 +00004543 flags |= FIEMAP_EXTENT_LAST;
4544 end = 1;
4545 }
Chris Masonec29ed52011-02-23 16:23:20 -05004546 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
4547 em_len, flags);
Chengyu Song26e726a2015-03-24 18:12:56 -04004548 if (ret) {
4549 if (ret == 1)
4550 ret = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05004551 goto out_free;
Chengyu Song26e726a2015-03-24 18:12:56 -04004552 }
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004553 }
4554out_free:
4555 free_extent_map(em);
4556out:
Liu Bofe09e162013-09-22 12:54:23 +08004557 btrfs_free_path(path);
Liu Boa52f4cd2013-05-01 16:23:41 +00004558 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
Josef Bacik2ac55d42010-02-03 19:33:23 +00004559 &cached_state, GFP_NOFS);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05004560 return ret;
4561}
4562
Chris Mason727011e2010-08-06 13:21:20 -04004563static void __free_extent_buffer(struct extent_buffer *eb)
4564{
Eric Sandeen6d49ba12013-04-22 16:12:31 +00004565 btrfs_leak_debug_del(&eb->leak_list);
Chris Mason727011e2010-08-06 13:21:20 -04004566 kmem_cache_free(extent_buffer_cache, eb);
4567}
4568
Josef Bacika26e8c92014-03-28 17:07:27 -04004569int extent_buffer_under_io(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05004570{
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004571 return (atomic_read(&eb->io_pages) ||
4572 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4573 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
Chris Masond1310b22008-01-24 16:13:08 -05004574}
4575
Miao Xie897ca6e92010-10-26 20:57:29 -04004576/*
4577 * Helper for releasing extent buffer page.
4578 */
David Sterbaa50924e2014-07-31 00:51:36 +02004579static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
Miao Xie897ca6e92010-10-26 20:57:29 -04004580{
4581 unsigned long index;
4582 struct page *page;
Jan Schmidt815a51c2012-05-16 17:00:02 +02004583 int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
Miao Xie897ca6e92010-10-26 20:57:29 -04004584
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004585 BUG_ON(extent_buffer_under_io(eb));
Miao Xie897ca6e92010-10-26 20:57:29 -04004586
David Sterbaa50924e2014-07-31 00:51:36 +02004587 index = num_extent_pages(eb->start, eb->len);
4588 if (index == 0)
Miao Xie897ca6e92010-10-26 20:57:29 -04004589 return;
4590
4591 do {
4592 index--;
David Sterbafb85fc92014-07-31 01:03:53 +02004593 page = eb->pages[index];
Forrest Liu5d2361d2015-02-09 17:31:45 +08004594 if (!page)
4595 continue;
4596 if (mapped)
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004597 spin_lock(&page->mapping->private_lock);
Forrest Liu5d2361d2015-02-09 17:31:45 +08004598 /*
4599 * We do this since we'll remove the pages after we've
4600 * removed the eb from the radix tree, so we could race
4601 * and have this page now attached to the new eb. So
4602 * only clear page_private if it's still connected to
4603 * this eb.
4604 */
4605 if (PagePrivate(page) &&
4606 page->private == (unsigned long)eb) {
4607 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4608 BUG_ON(PageDirty(page));
4609 BUG_ON(PageWriteback(page));
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004610 /*
Forrest Liu5d2361d2015-02-09 17:31:45 +08004611 * We need to make sure we haven't be attached
4612 * to a new eb.
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004613 */
Forrest Liu5d2361d2015-02-09 17:31:45 +08004614 ClearPagePrivate(page);
4615 set_page_private(page, 0);
4616 /* One for the page private */
Miao Xie897ca6e92010-10-26 20:57:29 -04004617 page_cache_release(page);
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004618 }
Forrest Liu5d2361d2015-02-09 17:31:45 +08004619
4620 if (mapped)
4621 spin_unlock(&page->mapping->private_lock);
4622
4623 /* One for when we alloced the page */
4624 page_cache_release(page);
David Sterbaa50924e2014-07-31 00:51:36 +02004625 } while (index != 0);
Miao Xie897ca6e92010-10-26 20:57:29 -04004626}
4627
4628/*
4629 * Helper for releasing the extent buffer.
4630 */
4631static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4632{
David Sterbaa50924e2014-07-31 00:51:36 +02004633 btrfs_release_extent_buffer_page(eb);
Miao Xie897ca6e92010-10-26 20:57:29 -04004634 __free_extent_buffer(eb);
4635}
4636
Josef Bacikf28491e2013-12-16 13:24:27 -05004637static struct extent_buffer *
4638__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
David Sterba23d79d82014-06-15 02:55:29 +02004639 unsigned long len)
Josef Bacikdb7f3432013-08-07 14:54:37 -04004640{
4641 struct extent_buffer *eb = NULL;
4642
Michal Hockod1b5c562015-08-19 14:17:40 +02004643 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004644 eb->start = start;
4645 eb->len = len;
Josef Bacikf28491e2013-12-16 13:24:27 -05004646 eb->fs_info = fs_info;
Josef Bacikdb7f3432013-08-07 14:54:37 -04004647 eb->bflags = 0;
4648 rwlock_init(&eb->lock);
4649 atomic_set(&eb->write_locks, 0);
4650 atomic_set(&eb->read_locks, 0);
4651 atomic_set(&eb->blocking_readers, 0);
4652 atomic_set(&eb->blocking_writers, 0);
4653 atomic_set(&eb->spinning_readers, 0);
4654 atomic_set(&eb->spinning_writers, 0);
4655 eb->lock_nested = 0;
4656 init_waitqueue_head(&eb->write_lock_wq);
4657 init_waitqueue_head(&eb->read_lock_wq);
4658
4659 btrfs_leak_debug_add(&eb->leak_list, &buffers);
4660
4661 spin_lock_init(&eb->refs_lock);
4662 atomic_set(&eb->refs, 1);
4663 atomic_set(&eb->io_pages, 0);
4664
4665 /*
4666 * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
4667 */
4668 BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
4669 > MAX_INLINE_EXTENT_BUFFER_SIZE);
4670 BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
4671
4672 return eb;
4673}
4674
4675struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4676{
4677 unsigned long i;
4678 struct page *p;
4679 struct extent_buffer *new;
4680 unsigned long num_pages = num_extent_pages(src->start, src->len);
4681
David Sterba3f556f72014-06-15 03:20:26 +02004682 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004683 if (new == NULL)
4684 return NULL;
4685
4686 for (i = 0; i < num_pages; i++) {
Josef Bacik9ec72672013-08-07 16:57:23 -04004687 p = alloc_page(GFP_NOFS);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004688 if (!p) {
4689 btrfs_release_extent_buffer(new);
4690 return NULL;
4691 }
4692 attach_extent_buffer_page(new, p);
4693 WARN_ON(PageDirty(p));
4694 SetPageUptodate(p);
4695 new->pages[i] = p;
4696 }
4697
4698 copy_extent_buffer(new, src, 0, 0, src->len);
4699 set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
4700 set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
4701
4702 return new;
4703}
4704
Omar Sandoval0f331222015-09-29 20:50:31 -07004705struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4706 u64 start, unsigned long len)
Josef Bacikdb7f3432013-08-07 14:54:37 -04004707{
4708 struct extent_buffer *eb;
David Sterba3f556f72014-06-15 03:20:26 +02004709 unsigned long num_pages;
Josef Bacikdb7f3432013-08-07 14:54:37 -04004710 unsigned long i;
4711
Omar Sandoval0f331222015-09-29 20:50:31 -07004712 num_pages = num_extent_pages(start, len);
David Sterba3f556f72014-06-15 03:20:26 +02004713
4714 eb = __alloc_extent_buffer(fs_info, start, len);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004715 if (!eb)
4716 return NULL;
4717
4718 for (i = 0; i < num_pages; i++) {
Josef Bacik9ec72672013-08-07 16:57:23 -04004719 eb->pages[i] = alloc_page(GFP_NOFS);
Josef Bacikdb7f3432013-08-07 14:54:37 -04004720 if (!eb->pages[i])
4721 goto err;
4722 }
4723 set_extent_buffer_uptodate(eb);
4724 btrfs_set_header_nritems(eb, 0);
4725 set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4726
4727 return eb;
4728err:
4729 for (; i > 0; i--)
4730 __free_page(eb->pages[i - 1]);
4731 __free_extent_buffer(eb);
4732 return NULL;
4733}
4734
Omar Sandoval0f331222015-09-29 20:50:31 -07004735struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4736 u64 start)
4737{
4738 unsigned long len;
4739
4740 if (!fs_info) {
4741 /*
4742 * Called only from tests that don't always have a fs_info
4743 * available, but we know that nodesize is 4096
4744 */
4745 len = 4096;
4746 } else {
4747 len = fs_info->tree_root->nodesize;
4748 }
4749
4750 return __alloc_dummy_extent_buffer(fs_info, start, len);
4751}
4752
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004753static void check_buffer_tree_ref(struct extent_buffer *eb)
4754{
Chris Mason242e18c2013-01-29 17:49:37 -05004755 int refs;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004756 /* the ref bit is tricky. We have to make sure it is set
4757 * if we have the buffer dirty. Otherwise the
4758 * code to free a buffer can end up dropping a dirty
4759 * page
4760 *
4761 * Once the ref bit is set, it won't go away while the
4762 * buffer is dirty or in writeback, and it also won't
4763 * go away while we have the reference count on the
4764 * eb bumped.
4765 *
4766 * We can't just set the ref bit without bumping the
4767 * ref on the eb because free_extent_buffer might
4768 * see the ref bit and try to clear it. If this happens
4769 * free_extent_buffer might end up dropping our original
4770 * ref by mistake and freeing the page before we are able
4771 * to add one more ref.
4772 *
4773 * So bump the ref count first, then set the bit. If someone
4774 * beat us to it, drop the ref we added.
4775 */
Chris Mason242e18c2013-01-29 17:49:37 -05004776 refs = atomic_read(&eb->refs);
4777 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4778 return;
4779
Josef Bacik594831c2012-07-20 16:11:08 -04004780 spin_lock(&eb->refs_lock);
4781 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004782 atomic_inc(&eb->refs);
Josef Bacik594831c2012-07-20 16:11:08 -04004783 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004784}
4785
Mel Gorman2457aec2014-06-04 16:10:31 -07004786static void mark_extent_buffer_accessed(struct extent_buffer *eb,
4787 struct page *accessed)
Josef Bacik5df42352012-03-15 18:24:42 -04004788{
4789 unsigned long num_pages, i;
4790
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004791 check_buffer_tree_ref(eb);
4792
Josef Bacik5df42352012-03-15 18:24:42 -04004793 num_pages = num_extent_pages(eb->start, eb->len);
4794 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02004795 struct page *p = eb->pages[i];
4796
Mel Gorman2457aec2014-06-04 16:10:31 -07004797 if (p != accessed)
4798 mark_page_accessed(p);
Josef Bacik5df42352012-03-15 18:24:42 -04004799 }
4800}
4801
Josef Bacikf28491e2013-12-16 13:24:27 -05004802struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
4803 u64 start)
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05004804{
4805 struct extent_buffer *eb;
4806
4807 rcu_read_lock();
Josef Bacikf28491e2013-12-16 13:24:27 -05004808 eb = radix_tree_lookup(&fs_info->buffer_radix,
4809 start >> PAGE_CACHE_SHIFT);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05004810 if (eb && atomic_inc_not_zero(&eb->refs)) {
4811 rcu_read_unlock();
Filipe Manana062c19e2015-04-23 11:28:48 +01004812 /*
4813 * Lock our eb's refs_lock to avoid races with
4814 * free_extent_buffer. When we get our eb it might be flagged
4815 * with EXTENT_BUFFER_STALE and another task running
4816 * free_extent_buffer might have seen that flag set,
4817 * eb->refs == 2, that the buffer isn't under IO (dirty and
4818 * writeback flags not set) and it's still in the tree (flag
4819 * EXTENT_BUFFER_TREE_REF set), therefore being in the process
4820 * of decrementing the extent buffer's reference count twice.
4821 * So here we could race and increment the eb's reference count,
4822 * clear its stale flag, mark it as dirty and drop our reference
4823 * before the other task finishes executing free_extent_buffer,
4824 * which would later result in an attempt to free an extent
4825 * buffer that is dirty.
4826 */
4827 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
4828 spin_lock(&eb->refs_lock);
4829 spin_unlock(&eb->refs_lock);
4830 }
Mel Gorman2457aec2014-06-04 16:10:31 -07004831 mark_extent_buffer_accessed(eb, NULL);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05004832 return eb;
4833 }
4834 rcu_read_unlock();
4835
4836 return NULL;
4837}
4838
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04004839#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4840struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
David Sterbace3e6982014-06-15 03:00:04 +02004841 u64 start)
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04004842{
4843 struct extent_buffer *eb, *exists = NULL;
4844 int ret;
4845
4846 eb = find_extent_buffer(fs_info, start);
4847 if (eb)
4848 return eb;
David Sterba3f556f72014-06-15 03:20:26 +02004849 eb = alloc_dummy_extent_buffer(fs_info, start);
Josef Bacikfaa2dbf2014-05-07 17:06:09 -04004850 if (!eb)
4851 return NULL;
4852 eb->fs_info = fs_info;
4853again:
4854 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4855 if (ret)
4856 goto free_eb;
4857 spin_lock(&fs_info->buffer_lock);
4858 ret = radix_tree_insert(&fs_info->buffer_radix,
4859 start >> PAGE_CACHE_SHIFT, eb);
4860 spin_unlock(&fs_info->buffer_lock);
4861 radix_tree_preload_end();
4862 if (ret == -EEXIST) {
4863 exists = find_extent_buffer(fs_info, start);
4864 if (exists)
4865 goto free_eb;
4866 else
4867 goto again;
4868 }
4869 check_buffer_tree_ref(eb);
4870 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
4871
4872 /*
4873 * We will free dummy extent buffer's if they come into
4874 * free_extent_buffer with a ref count of 2, but if we are using this we
4875 * want the buffers to stay in memory until we're done with them, so
4876 * bump the ref count again.
4877 */
4878 atomic_inc(&eb->refs);
4879 return eb;
4880free_eb:
4881 btrfs_release_extent_buffer(eb);
4882 return exists;
4883}
4884#endif
4885
Josef Bacikf28491e2013-12-16 13:24:27 -05004886struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
David Sterbace3e6982014-06-15 03:00:04 +02004887 u64 start)
Chris Masond1310b22008-01-24 16:13:08 -05004888{
David Sterbace3e6982014-06-15 03:00:04 +02004889 unsigned long len = fs_info->tree_root->nodesize;
Chris Masond1310b22008-01-24 16:13:08 -05004890 unsigned long num_pages = num_extent_pages(start, len);
4891 unsigned long i;
4892 unsigned long index = start >> PAGE_CACHE_SHIFT;
4893 struct extent_buffer *eb;
Chris Mason6af118ce2008-07-22 11:18:07 -04004894 struct extent_buffer *exists = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05004895 struct page *p;
Josef Bacikf28491e2013-12-16 13:24:27 -05004896 struct address_space *mapping = fs_info->btree_inode->i_mapping;
Chris Masond1310b22008-01-24 16:13:08 -05004897 int uptodate = 1;
Miao Xie19fe0a82010-10-26 20:57:29 -04004898 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05004899
Josef Bacikf28491e2013-12-16 13:24:27 -05004900 eb = find_extent_buffer(fs_info, start);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05004901 if (eb)
Chris Mason6af118ce2008-07-22 11:18:07 -04004902 return eb;
Chris Mason6af118ce2008-07-22 11:18:07 -04004903
David Sterba23d79d82014-06-15 02:55:29 +02004904 eb = __alloc_extent_buffer(fs_info, start, len);
Peter2b114d12008-04-01 11:21:40 -04004905 if (!eb)
Chris Masond1310b22008-01-24 16:13:08 -05004906 return NULL;
4907
Chris Mason727011e2010-08-06 13:21:20 -04004908 for (i = 0; i < num_pages; i++, index++) {
Michal Hockod1b5c562015-08-19 14:17:40 +02004909 p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
Josef Bacik4804b382012-10-05 16:43:45 -04004910 if (!p)
Chris Mason6af118ce2008-07-22 11:18:07 -04004911 goto free_eb;
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004912
4913 spin_lock(&mapping->private_lock);
4914 if (PagePrivate(p)) {
4915 /*
4916 * We could have already allocated an eb for this page
4917 * and attached one so lets see if we can get a ref on
4918 * the existing eb, and if we can we know it's good and
4919 * we can just return that one, else we know we can just
4920 * overwrite page->private.
4921 */
4922 exists = (struct extent_buffer *)p->private;
4923 if (atomic_inc_not_zero(&exists->refs)) {
4924 spin_unlock(&mapping->private_lock);
4925 unlock_page(p);
Josef Bacik17de39a2012-05-04 15:16:06 -04004926 page_cache_release(p);
Mel Gorman2457aec2014-06-04 16:10:31 -07004927 mark_extent_buffer_accessed(exists, p);
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004928 goto free_eb;
4929 }
Omar Sandoval5ca64f42015-02-24 02:47:05 -08004930 exists = NULL;
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004931
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004932 /*
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004933 * Do this so attach doesn't complain and we need to
4934 * drop the ref the old guy had.
4935 */
4936 ClearPagePrivate(p);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004937 WARN_ON(PageDirty(p));
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004938 page_cache_release(p);
Chris Masond1310b22008-01-24 16:13:08 -05004939 }
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004940 attach_extent_buffer_page(eb, p);
4941 spin_unlock(&mapping->private_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004942 WARN_ON(PageDirty(p));
Chris Mason727011e2010-08-06 13:21:20 -04004943 eb->pages[i] = p;
Chris Masond1310b22008-01-24 16:13:08 -05004944 if (!PageUptodate(p))
4945 uptodate = 0;
Chris Masoneb14ab82011-02-10 12:35:00 -05004946
4947 /*
4948 * see below about how we avoid a nasty race with release page
4949 * and why we unlock later
4950 */
Chris Masond1310b22008-01-24 16:13:08 -05004951 }
4952 if (uptodate)
Chris Masonb4ce94d2009-02-04 09:25:08 -05004953 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Josef Bacik115391d2012-03-09 09:51:43 -05004954again:
Miao Xie19fe0a82010-10-26 20:57:29 -04004955 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4956 if (ret)
4957 goto free_eb;
4958
Josef Bacikf28491e2013-12-16 13:24:27 -05004959 spin_lock(&fs_info->buffer_lock);
4960 ret = radix_tree_insert(&fs_info->buffer_radix,
4961 start >> PAGE_CACHE_SHIFT, eb);
4962 spin_unlock(&fs_info->buffer_lock);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05004963 radix_tree_preload_end();
Miao Xie19fe0a82010-10-26 20:57:29 -04004964 if (ret == -EEXIST) {
Josef Bacikf28491e2013-12-16 13:24:27 -05004965 exists = find_extent_buffer(fs_info, start);
Chandra Seetharaman452c75c2013-10-07 10:45:25 -05004966 if (exists)
4967 goto free_eb;
4968 else
Josef Bacik115391d2012-03-09 09:51:43 -05004969 goto again;
Chris Mason6af118ce2008-07-22 11:18:07 -04004970 }
Chris Mason6af118ce2008-07-22 11:18:07 -04004971 /* add one reference for the tree */
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004972 check_buffer_tree_ref(eb);
Josef Bacik34b41ac2013-12-13 10:41:51 -05004973 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
Chris Masoneb14ab82011-02-10 12:35:00 -05004974
4975 /*
4976 * there is a race where release page may have
4977 * tried to find this extent buffer in the radix
4978 * but failed. It will tell the VM it is safe to
4979 * reclaim the, and it will clear the page private bit.
4980 * We must make sure to set the page private bit properly
4981 * after the extent buffer is in the radix tree so
4982 * it doesn't get lost
4983 */
Chris Mason727011e2010-08-06 13:21:20 -04004984 SetPageChecked(eb->pages[0]);
4985 for (i = 1; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02004986 p = eb->pages[i];
Chris Mason727011e2010-08-06 13:21:20 -04004987 ClearPageChecked(p);
4988 unlock_page(p);
4989 }
4990 unlock_page(eb->pages[0]);
Chris Masond1310b22008-01-24 16:13:08 -05004991 return eb;
4992
Chris Mason6af118ce2008-07-22 11:18:07 -04004993free_eb:
Omar Sandoval5ca64f42015-02-24 02:47:05 -08004994 WARN_ON(!atomic_dec_and_test(&eb->refs));
Chris Mason727011e2010-08-06 13:21:20 -04004995 for (i = 0; i < num_pages; i++) {
4996 if (eb->pages[i])
4997 unlock_page(eb->pages[i]);
4998 }
Chris Masoneb14ab82011-02-10 12:35:00 -05004999
Miao Xie897ca6e92010-10-26 20:57:29 -04005000 btrfs_release_extent_buffer(eb);
Chris Mason6af118ce2008-07-22 11:18:07 -04005001 return exists;
Chris Masond1310b22008-01-24 16:13:08 -05005002}
Chris Masond1310b22008-01-24 16:13:08 -05005003
Josef Bacik3083ee22012-03-09 16:01:49 -05005004static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
5005{
5006 struct extent_buffer *eb =
5007 container_of(head, struct extent_buffer, rcu_head);
5008
5009 __free_extent_buffer(eb);
5010}
5011
Josef Bacik3083ee22012-03-09 16:01:49 -05005012/* Expects to have eb->eb_lock already held */
David Sterbaf7a52a42013-04-26 14:56:29 +00005013static int release_extent_buffer(struct extent_buffer *eb)
Josef Bacik3083ee22012-03-09 16:01:49 -05005014{
5015 WARN_ON(atomic_read(&eb->refs) == 0);
5016 if (atomic_dec_and_test(&eb->refs)) {
Josef Bacik34b41ac2013-12-13 10:41:51 -05005017 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
Josef Bacikf28491e2013-12-16 13:24:27 -05005018 struct btrfs_fs_info *fs_info = eb->fs_info;
Josef Bacik3083ee22012-03-09 16:01:49 -05005019
Jan Schmidt815a51c2012-05-16 17:00:02 +02005020 spin_unlock(&eb->refs_lock);
Josef Bacik3083ee22012-03-09 16:01:49 -05005021
Josef Bacikf28491e2013-12-16 13:24:27 -05005022 spin_lock(&fs_info->buffer_lock);
5023 radix_tree_delete(&fs_info->buffer_radix,
Jan Schmidt815a51c2012-05-16 17:00:02 +02005024 eb->start >> PAGE_CACHE_SHIFT);
Josef Bacikf28491e2013-12-16 13:24:27 -05005025 spin_unlock(&fs_info->buffer_lock);
Josef Bacik34b41ac2013-12-13 10:41:51 -05005026 } else {
5027 spin_unlock(&eb->refs_lock);
Jan Schmidt815a51c2012-05-16 17:00:02 +02005028 }
Josef Bacik3083ee22012-03-09 16:01:49 -05005029
5030 /* Should be safe to release our pages at this point */
David Sterbaa50924e2014-07-31 00:51:36 +02005031 btrfs_release_extent_buffer_page(eb);
Josef Bacikbcb7e442015-03-16 17:38:02 -04005032#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5033 if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))) {
5034 __free_extent_buffer(eb);
5035 return 1;
5036 }
5037#endif
Josef Bacik3083ee22012-03-09 16:01:49 -05005038 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
Josef Bacike64860a2012-07-20 16:05:36 -04005039 return 1;
Josef Bacik3083ee22012-03-09 16:01:49 -05005040 }
5041 spin_unlock(&eb->refs_lock);
Josef Bacike64860a2012-07-20 16:05:36 -04005042
5043 return 0;
Josef Bacik3083ee22012-03-09 16:01:49 -05005044}
5045
Chris Masond1310b22008-01-24 16:13:08 -05005046void free_extent_buffer(struct extent_buffer *eb)
5047{
Chris Mason242e18c2013-01-29 17:49:37 -05005048 int refs;
5049 int old;
Chris Masond1310b22008-01-24 16:13:08 -05005050 if (!eb)
5051 return;
5052
Chris Mason242e18c2013-01-29 17:49:37 -05005053 while (1) {
5054 refs = atomic_read(&eb->refs);
5055 if (refs <= 3)
5056 break;
5057 old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
5058 if (old == refs)
5059 return;
5060 }
5061
Josef Bacik3083ee22012-03-09 16:01:49 -05005062 spin_lock(&eb->refs_lock);
5063 if (atomic_read(&eb->refs) == 2 &&
Jan Schmidt815a51c2012-05-16 17:00:02 +02005064 test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
5065 atomic_dec(&eb->refs);
5066
5067 if (atomic_read(&eb->refs) == 2 &&
Josef Bacik3083ee22012-03-09 16:01:49 -05005068 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005069 !extent_buffer_under_io(eb) &&
Josef Bacik3083ee22012-03-09 16:01:49 -05005070 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5071 atomic_dec(&eb->refs);
Chris Masond1310b22008-01-24 16:13:08 -05005072
Josef Bacik3083ee22012-03-09 16:01:49 -05005073 /*
5074 * I know this is terrible, but it's temporary until we stop tracking
5075 * the uptodate bits and such for the extent buffers.
5076 */
David Sterbaf7a52a42013-04-26 14:56:29 +00005077 release_extent_buffer(eb);
Chris Masond1310b22008-01-24 16:13:08 -05005078}
Chris Masond1310b22008-01-24 16:13:08 -05005079
Josef Bacik3083ee22012-03-09 16:01:49 -05005080void free_extent_buffer_stale(struct extent_buffer *eb)
5081{
5082 if (!eb)
Chris Masond1310b22008-01-24 16:13:08 -05005083 return;
5084
Josef Bacik3083ee22012-03-09 16:01:49 -05005085 spin_lock(&eb->refs_lock);
5086 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
5087
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005088 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
Josef Bacik3083ee22012-03-09 16:01:49 -05005089 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5090 atomic_dec(&eb->refs);
David Sterbaf7a52a42013-04-26 14:56:29 +00005091 release_extent_buffer(eb);
Chris Masond1310b22008-01-24 16:13:08 -05005092}
5093
Chris Mason1d4284b2012-03-28 20:31:37 -04005094void clear_extent_buffer_dirty(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05005095{
Chris Masond1310b22008-01-24 16:13:08 -05005096 unsigned long i;
5097 unsigned long num_pages;
5098 struct page *page;
5099
Chris Masond1310b22008-01-24 16:13:08 -05005100 num_pages = num_extent_pages(eb->start, eb->len);
5101
5102 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005103 page = eb->pages[i];
Chris Masonb9473432009-03-13 11:00:37 -04005104 if (!PageDirty(page))
Chris Masond2c3f4f2008-11-19 12:44:22 -05005105 continue;
5106
Chris Masona61e6f22008-07-22 11:18:08 -04005107 lock_page(page);
Chris Masoneb14ab82011-02-10 12:35:00 -05005108 WARN_ON(!PagePrivate(page));
5109
Chris Masond1310b22008-01-24 16:13:08 -05005110 clear_page_dirty_for_io(page);
Sven Wegener0ee0fda2008-07-30 16:54:26 -04005111 spin_lock_irq(&page->mapping->tree_lock);
Chris Masond1310b22008-01-24 16:13:08 -05005112 if (!PageDirty(page)) {
5113 radix_tree_tag_clear(&page->mapping->page_tree,
5114 page_index(page),
5115 PAGECACHE_TAG_DIRTY);
5116 }
Sven Wegener0ee0fda2008-07-30 16:54:26 -04005117 spin_unlock_irq(&page->mapping->tree_lock);
Chris Masonbf0da8c2011-11-04 12:29:37 -04005118 ClearPageError(page);
Chris Masona61e6f22008-07-22 11:18:08 -04005119 unlock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05005120 }
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005121 WARN_ON(atomic_read(&eb->refs) == 0);
Chris Masond1310b22008-01-24 16:13:08 -05005122}
Chris Masond1310b22008-01-24 16:13:08 -05005123
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005124int set_extent_buffer_dirty(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05005125{
5126 unsigned long i;
5127 unsigned long num_pages;
Chris Masonb9473432009-03-13 11:00:37 -04005128 int was_dirty = 0;
Chris Masond1310b22008-01-24 16:13:08 -05005129
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005130 check_buffer_tree_ref(eb);
5131
Chris Masonb9473432009-03-13 11:00:37 -04005132 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005133
Chris Masond1310b22008-01-24 16:13:08 -05005134 num_pages = num_extent_pages(eb->start, eb->len);
Josef Bacik3083ee22012-03-09 16:01:49 -05005135 WARN_ON(atomic_read(&eb->refs) == 0);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005136 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
5137
Chris Masonb9473432009-03-13 11:00:37 -04005138 for (i = 0; i < num_pages; i++)
David Sterbafb85fc92014-07-31 01:03:53 +02005139 set_page_dirty(eb->pages[i]);
Chris Masonb9473432009-03-13 11:00:37 -04005140 return was_dirty;
Chris Masond1310b22008-01-24 16:13:08 -05005141}
Chris Masond1310b22008-01-24 16:13:08 -05005142
David Sterba69ba3922015-12-03 13:08:59 +01005143void clear_extent_buffer_uptodate(struct extent_buffer *eb)
Chris Mason1259ab72008-05-12 13:39:03 -04005144{
5145 unsigned long i;
5146 struct page *page;
5147 unsigned long num_pages;
5148
Chris Masonb4ce94d2009-02-04 09:25:08 -05005149 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005150 num_pages = num_extent_pages(eb->start, eb->len);
Chris Mason1259ab72008-05-12 13:39:03 -04005151 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005152 page = eb->pages[i];
Chris Mason33958dc2008-07-30 10:29:12 -04005153 if (page)
5154 ClearPageUptodate(page);
Chris Mason1259ab72008-05-12 13:39:03 -04005155 }
Chris Mason1259ab72008-05-12 13:39:03 -04005156}
5157
David Sterba09c25a82015-12-03 13:08:59 +01005158void set_extent_buffer_uptodate(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05005159{
5160 unsigned long i;
5161 struct page *page;
5162 unsigned long num_pages;
5163
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005164 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masond1310b22008-01-24 16:13:08 -05005165 num_pages = num_extent_pages(eb->start, eb->len);
Chris Masond1310b22008-01-24 16:13:08 -05005166 for (i = 0; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005167 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005168 SetPageUptodate(page);
5169 }
Chris Masond1310b22008-01-24 16:13:08 -05005170}
Chris Masond1310b22008-01-24 16:13:08 -05005171
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005172int extent_buffer_uptodate(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05005173{
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005174 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masond1310b22008-01-24 16:13:08 -05005175}
Chris Masond1310b22008-01-24 16:13:08 -05005176
5177int read_extent_buffer_pages(struct extent_io_tree *tree,
Arne Jansenbb82ab82011-06-10 14:06:53 +02005178 struct extent_buffer *eb, u64 start, int wait,
Chris Masonf1885912008-04-09 16:28:12 -04005179 get_extent_t *get_extent, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05005180{
5181 unsigned long i;
5182 unsigned long start_i;
5183 struct page *page;
5184 int err;
5185 int ret = 0;
Chris Masonce9adaa2008-04-09 16:28:12 -04005186 int locked_pages = 0;
5187 int all_uptodate = 1;
Chris Masond1310b22008-01-24 16:13:08 -05005188 unsigned long num_pages;
Chris Mason727011e2010-08-06 13:21:20 -04005189 unsigned long num_reads = 0;
Chris Masona86c12c2008-02-07 10:50:54 -05005190 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04005191 unsigned long bio_flags = 0;
Chris Masona86c12c2008-02-07 10:50:54 -05005192
Chris Masonb4ce94d2009-02-04 09:25:08 -05005193 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
Chris Masond1310b22008-01-24 16:13:08 -05005194 return 0;
5195
Chris Masond1310b22008-01-24 16:13:08 -05005196 if (start) {
5197 WARN_ON(start < eb->start);
5198 start_i = (start >> PAGE_CACHE_SHIFT) -
5199 (eb->start >> PAGE_CACHE_SHIFT);
5200 } else {
5201 start_i = 0;
5202 }
5203
5204 num_pages = num_extent_pages(eb->start, eb->len);
5205 for (i = start_i; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005206 page = eb->pages[i];
Arne Jansenbb82ab82011-06-10 14:06:53 +02005207 if (wait == WAIT_NONE) {
David Woodhouse2db04962008-08-07 11:19:43 -04005208 if (!trylock_page(page))
Chris Masonce9adaa2008-04-09 16:28:12 -04005209 goto unlock_exit;
Chris Masond1310b22008-01-24 16:13:08 -05005210 } else {
5211 lock_page(page);
5212 }
Chris Masonce9adaa2008-04-09 16:28:12 -04005213 locked_pages++;
Chris Mason727011e2010-08-06 13:21:20 -04005214 if (!PageUptodate(page)) {
5215 num_reads++;
Chris Masonce9adaa2008-04-09 16:28:12 -04005216 all_uptodate = 0;
Chris Mason727011e2010-08-06 13:21:20 -04005217 }
Chris Masonce9adaa2008-04-09 16:28:12 -04005218 }
5219 if (all_uptodate) {
5220 if (start_i == 0)
Chris Masonb4ce94d2009-02-04 09:25:08 -05005221 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masonce9adaa2008-04-09 16:28:12 -04005222 goto unlock_exit;
5223 }
5224
Filipe Manana656f30d2014-09-26 12:25:56 +01005225 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
Josef Bacik5cf1ab52012-04-16 09:42:26 -04005226 eb->read_mirror = 0;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005227 atomic_set(&eb->io_pages, num_reads);
Chris Masonce9adaa2008-04-09 16:28:12 -04005228 for (i = start_i; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005229 page = eb->pages[i];
Chris Masonce9adaa2008-04-09 16:28:12 -04005230 if (!PageUptodate(page)) {
Chris Masonf1885912008-04-09 16:28:12 -04005231 ClearPageError(page);
Chris Masona86c12c2008-02-07 10:50:54 -05005232 err = __extent_read_full_page(tree, page,
Chris Masonf1885912008-04-09 16:28:12 -04005233 get_extent, &bio,
Josef Bacikd4c7ca82013-04-19 19:49:09 -04005234 mirror_num, &bio_flags,
5235 READ | REQ_META);
Chris Masond3977122009-01-05 21:25:51 -05005236 if (err)
Chris Masond1310b22008-01-24 16:13:08 -05005237 ret = err;
Chris Masond1310b22008-01-24 16:13:08 -05005238 } else {
5239 unlock_page(page);
5240 }
5241 }
5242
Jeff Mahoney355808c2011-10-03 23:23:14 -04005243 if (bio) {
Josef Bacikd4c7ca82013-04-19 19:49:09 -04005244 err = submit_one_bio(READ | REQ_META, bio, mirror_num,
5245 bio_flags);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01005246 if (err)
5247 return err;
Jeff Mahoney355808c2011-10-03 23:23:14 -04005248 }
Chris Masona86c12c2008-02-07 10:50:54 -05005249
Arne Jansenbb82ab82011-06-10 14:06:53 +02005250 if (ret || wait != WAIT_COMPLETE)
Chris Masond1310b22008-01-24 16:13:08 -05005251 return ret;
Chris Masond3977122009-01-05 21:25:51 -05005252
Chris Masond1310b22008-01-24 16:13:08 -05005253 for (i = start_i; i < num_pages; i++) {
David Sterbafb85fc92014-07-31 01:03:53 +02005254 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005255 wait_on_page_locked(page);
Chris Masond3977122009-01-05 21:25:51 -05005256 if (!PageUptodate(page))
Chris Masond1310b22008-01-24 16:13:08 -05005257 ret = -EIO;
Chris Masond1310b22008-01-24 16:13:08 -05005258 }
Chris Masond3977122009-01-05 21:25:51 -05005259
Chris Masond1310b22008-01-24 16:13:08 -05005260 return ret;
Chris Masonce9adaa2008-04-09 16:28:12 -04005261
5262unlock_exit:
5263 i = start_i;
Chris Masond3977122009-01-05 21:25:51 -05005264 while (locked_pages > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005265 page = eb->pages[i];
Chris Masonce9adaa2008-04-09 16:28:12 -04005266 i++;
5267 unlock_page(page);
5268 locked_pages--;
5269 }
5270 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05005271}
Chris Masond1310b22008-01-24 16:13:08 -05005272
5273void read_extent_buffer(struct extent_buffer *eb, void *dstv,
5274 unsigned long start,
5275 unsigned long len)
5276{
5277 size_t cur;
5278 size_t offset;
5279 struct page *page;
5280 char *kaddr;
5281 char *dst = (char *)dstv;
5282 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5283 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05005284
5285 WARN_ON(start > eb->len);
5286 WARN_ON(start + len > eb->start + eb->len);
5287
Geert Uytterhoeven778746b2013-08-20 13:20:16 +02005288 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005289
Chris Masond3977122009-01-05 21:25:51 -05005290 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005291 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005292
5293 cur = min(len, (PAGE_CACHE_SIZE - offset));
Chris Masona6591712011-07-19 12:04:14 -04005294 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05005295 memcpy(dst, kaddr + offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005296
5297 dst += cur;
5298 len -= cur;
5299 offset = 0;
5300 i++;
5301 }
5302}
Chris Masond1310b22008-01-24 16:13:08 -05005303
Gerhard Heift550ac1d2014-01-30 16:24:01 +01005304int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
5305 unsigned long start,
5306 unsigned long len)
5307{
5308 size_t cur;
5309 size_t offset;
5310 struct page *page;
5311 char *kaddr;
5312 char __user *dst = (char __user *)dstv;
5313 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5314 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5315 int ret = 0;
5316
5317 WARN_ON(start > eb->len);
5318 WARN_ON(start + len > eb->start + eb->len);
5319
5320 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5321
5322 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005323 page = eb->pages[i];
Gerhard Heift550ac1d2014-01-30 16:24:01 +01005324
5325 cur = min(len, (PAGE_CACHE_SIZE - offset));
5326 kaddr = page_address(page);
5327 if (copy_to_user(dst, kaddr + offset, cur)) {
5328 ret = -EFAULT;
5329 break;
5330 }
5331
5332 dst += cur;
5333 len -= cur;
5334 offset = 0;
5335 i++;
5336 }
5337
5338 return ret;
5339}
5340
Chris Masond1310b22008-01-24 16:13:08 -05005341int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
Chris Masona6591712011-07-19 12:04:14 -04005342 unsigned long min_len, char **map,
Chris Masond1310b22008-01-24 16:13:08 -05005343 unsigned long *map_start,
Chris Masona6591712011-07-19 12:04:14 -04005344 unsigned long *map_len)
Chris Masond1310b22008-01-24 16:13:08 -05005345{
5346 size_t offset = start & (PAGE_CACHE_SIZE - 1);
5347 char *kaddr;
5348 struct page *p;
5349 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5350 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5351 unsigned long end_i = (start_offset + start + min_len - 1) >>
5352 PAGE_CACHE_SHIFT;
5353
5354 if (i != end_i)
5355 return -EINVAL;
5356
5357 if (i == 0) {
5358 offset = start_offset;
5359 *map_start = 0;
5360 } else {
5361 offset = 0;
5362 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
5363 }
Chris Masond3977122009-01-05 21:25:51 -05005364
Chris Masond1310b22008-01-24 16:13:08 -05005365 if (start + min_len > eb->len) {
Julia Lawall31b1a2b2012-11-03 10:58:34 +00005366 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02005367 "wanted %lu %lu\n",
5368 eb->start, eb->len, start, min_len);
Josef Bacik850265332011-03-15 14:52:12 -04005369 return -EINVAL;
Chris Masond1310b22008-01-24 16:13:08 -05005370 }
5371
David Sterbafb85fc92014-07-31 01:03:53 +02005372 p = eb->pages[i];
Chris Masona6591712011-07-19 12:04:14 -04005373 kaddr = page_address(p);
Chris Masond1310b22008-01-24 16:13:08 -05005374 *map = kaddr + offset;
5375 *map_len = PAGE_CACHE_SIZE - offset;
5376 return 0;
5377}
Chris Masond1310b22008-01-24 16:13:08 -05005378
Chris Masond1310b22008-01-24 16:13:08 -05005379int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
5380 unsigned long start,
5381 unsigned long len)
5382{
5383 size_t cur;
5384 size_t offset;
5385 struct page *page;
5386 char *kaddr;
5387 char *ptr = (char *)ptrv;
5388 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5389 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5390 int ret = 0;
5391
5392 WARN_ON(start > eb->len);
5393 WARN_ON(start + len > eb->start + eb->len);
5394
Geert Uytterhoeven778746b2013-08-20 13:20:16 +02005395 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005396
Chris Masond3977122009-01-05 21:25:51 -05005397 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005398 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005399
5400 cur = min(len, (PAGE_CACHE_SIZE - offset));
5401
Chris Masona6591712011-07-19 12:04:14 -04005402 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05005403 ret = memcmp(ptr, kaddr + offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005404 if (ret)
5405 break;
5406
5407 ptr += cur;
5408 len -= cur;
5409 offset = 0;
5410 i++;
5411 }
5412 return ret;
5413}
Chris Masond1310b22008-01-24 16:13:08 -05005414
5415void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
5416 unsigned long start, unsigned long len)
5417{
5418 size_t cur;
5419 size_t offset;
5420 struct page *page;
5421 char *kaddr;
5422 char *src = (char *)srcv;
5423 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5424 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5425
5426 WARN_ON(start > eb->len);
5427 WARN_ON(start + len > eb->start + eb->len);
5428
Geert Uytterhoeven778746b2013-08-20 13:20:16 +02005429 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005430
Chris Masond3977122009-01-05 21:25:51 -05005431 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005432 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005433 WARN_ON(!PageUptodate(page));
5434
5435 cur = min(len, PAGE_CACHE_SIZE - offset);
Chris Masona6591712011-07-19 12:04:14 -04005436 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05005437 memcpy(kaddr + offset, src, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005438
5439 src += cur;
5440 len -= cur;
5441 offset = 0;
5442 i++;
5443 }
5444}
Chris Masond1310b22008-01-24 16:13:08 -05005445
5446void memset_extent_buffer(struct extent_buffer *eb, char c,
5447 unsigned long start, unsigned long len)
5448{
5449 size_t cur;
5450 size_t offset;
5451 struct page *page;
5452 char *kaddr;
5453 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5454 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5455
5456 WARN_ON(start > eb->len);
5457 WARN_ON(start + len > eb->start + eb->len);
5458
Geert Uytterhoeven778746b2013-08-20 13:20:16 +02005459 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005460
Chris Masond3977122009-01-05 21:25:51 -05005461 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005462 page = eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005463 WARN_ON(!PageUptodate(page));
5464
5465 cur = min(len, PAGE_CACHE_SIZE - offset);
Chris Masona6591712011-07-19 12:04:14 -04005466 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05005467 memset(kaddr + offset, c, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005468
5469 len -= cur;
5470 offset = 0;
5471 i++;
5472 }
5473}
Chris Masond1310b22008-01-24 16:13:08 -05005474
5475void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
5476 unsigned long dst_offset, unsigned long src_offset,
5477 unsigned long len)
5478{
5479 u64 dst_len = dst->len;
5480 size_t cur;
5481 size_t offset;
5482 struct page *page;
5483 char *kaddr;
5484 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5485 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
5486
5487 WARN_ON(src->len != dst_len);
5488
5489 offset = (start_offset + dst_offset) &
Geert Uytterhoeven778746b2013-08-20 13:20:16 +02005490 (PAGE_CACHE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005491
Chris Masond3977122009-01-05 21:25:51 -05005492 while (len > 0) {
David Sterbafb85fc92014-07-31 01:03:53 +02005493 page = dst->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05005494 WARN_ON(!PageUptodate(page));
5495
5496 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
5497
Chris Masona6591712011-07-19 12:04:14 -04005498 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05005499 read_extent_buffer(src, kaddr + offset, src_offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05005500
5501 src_offset += cur;
5502 len -= cur;
5503 offset = 0;
5504 i++;
5505 }
5506}
Chris Masond1310b22008-01-24 16:13:08 -05005507
Omar Sandoval3e1e8bb2015-09-29 20:50:30 -07005508/*
5509 * The extent buffer bitmap operations are done with byte granularity because
5510 * bitmap items are not guaranteed to be aligned to a word and therefore a
5511 * single word in a bitmap may straddle two pages in the extent buffer.
5512 */
5513#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
5514#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
5515#define BITMAP_FIRST_BYTE_MASK(start) \
5516 ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
5517#define BITMAP_LAST_BYTE_MASK(nbits) \
5518 (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
5519
5520/*
5521 * eb_bitmap_offset() - calculate the page and offset of the byte containing the
5522 * given bit number
5523 * @eb: the extent buffer
5524 * @start: offset of the bitmap item in the extent buffer
5525 * @nr: bit number
5526 * @page_index: return index of the page in the extent buffer that contains the
5527 * given bit number
5528 * @page_offset: return offset into the page given by page_index
5529 *
5530 * This helper hides the ugliness of finding the byte in an extent buffer which
5531 * contains a given bit.
5532 */
5533static inline void eb_bitmap_offset(struct extent_buffer *eb,
5534 unsigned long start, unsigned long nr,
5535 unsigned long *page_index,
5536 size_t *page_offset)
5537{
5538 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5539 size_t byte_offset = BIT_BYTE(nr);
5540 size_t offset;
5541
5542 /*
5543 * The byte we want is the offset of the extent buffer + the offset of
5544 * the bitmap item in the extent buffer + the offset of the byte in the
5545 * bitmap item.
5546 */
5547 offset = start_offset + start + byte_offset;
5548
5549 *page_index = offset >> PAGE_CACHE_SHIFT;
5550 *page_offset = offset & (PAGE_CACHE_SIZE - 1);
5551}
5552
5553/**
5554 * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
5555 * @eb: the extent buffer
5556 * @start: offset of the bitmap item in the extent buffer
5557 * @nr: bit number to test
5558 */
5559int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
5560 unsigned long nr)
5561{
5562 char *kaddr;
5563 struct page *page;
5564 unsigned long i;
5565 size_t offset;
5566
5567 eb_bitmap_offset(eb, start, nr, &i, &offset);
5568 page = eb->pages[i];
5569 WARN_ON(!PageUptodate(page));
5570 kaddr = page_address(page);
5571 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
5572}
5573
5574/**
5575 * extent_buffer_bitmap_set - set an area of a bitmap
5576 * @eb: the extent buffer
5577 * @start: offset of the bitmap item in the extent buffer
5578 * @pos: bit number of the first bit
5579 * @len: number of bits to set
5580 */
5581void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
5582 unsigned long pos, unsigned long len)
5583{
5584 char *kaddr;
5585 struct page *page;
5586 unsigned long i;
5587 size_t offset;
5588 const unsigned int size = pos + len;
5589 int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5590 unsigned int mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
5591
5592 eb_bitmap_offset(eb, start, pos, &i, &offset);
5593 page = eb->pages[i];
5594 WARN_ON(!PageUptodate(page));
5595 kaddr = page_address(page);
5596
5597 while (len >= bits_to_set) {
5598 kaddr[offset] |= mask_to_set;
5599 len -= bits_to_set;
5600 bits_to_set = BITS_PER_BYTE;
5601 mask_to_set = ~0U;
5602 if (++offset >= PAGE_CACHE_SIZE && len > 0) {
5603 offset = 0;
5604 page = eb->pages[++i];
5605 WARN_ON(!PageUptodate(page));
5606 kaddr = page_address(page);
5607 }
5608 }
5609 if (len) {
5610 mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
5611 kaddr[offset] |= mask_to_set;
5612 }
5613}
5614
5615
5616/**
5617 * extent_buffer_bitmap_clear - clear an area of a bitmap
5618 * @eb: the extent buffer
5619 * @start: offset of the bitmap item in the extent buffer
5620 * @pos: bit number of the first bit
5621 * @len: number of bits to clear
5622 */
5623void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
5624 unsigned long pos, unsigned long len)
5625{
5626 char *kaddr;
5627 struct page *page;
5628 unsigned long i;
5629 size_t offset;
5630 const unsigned int size = pos + len;
5631 int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5632 unsigned int mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
5633
5634 eb_bitmap_offset(eb, start, pos, &i, &offset);
5635 page = eb->pages[i];
5636 WARN_ON(!PageUptodate(page));
5637 kaddr = page_address(page);
5638
5639 while (len >= bits_to_clear) {
5640 kaddr[offset] &= ~mask_to_clear;
5641 len -= bits_to_clear;
5642 bits_to_clear = BITS_PER_BYTE;
5643 mask_to_clear = ~0U;
5644 if (++offset >= PAGE_CACHE_SIZE && len > 0) {
5645 offset = 0;
5646 page = eb->pages[++i];
5647 WARN_ON(!PageUptodate(page));
5648 kaddr = page_address(page);
5649 }
5650 }
5651 if (len) {
5652 mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
5653 kaddr[offset] &= ~mask_to_clear;
5654 }
5655}
5656
Sergei Trofimovich33872062011-04-11 21:52:52 +00005657static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
5658{
5659 unsigned long distance = (src > dst) ? src - dst : dst - src;
5660 return distance < len;
5661}
5662
Chris Masond1310b22008-01-24 16:13:08 -05005663static void copy_pages(struct page *dst_page, struct page *src_page,
5664 unsigned long dst_off, unsigned long src_off,
5665 unsigned long len)
5666{
Chris Masona6591712011-07-19 12:04:14 -04005667 char *dst_kaddr = page_address(dst_page);
Chris Masond1310b22008-01-24 16:13:08 -05005668 char *src_kaddr;
Chris Mason727011e2010-08-06 13:21:20 -04005669 int must_memmove = 0;
Chris Masond1310b22008-01-24 16:13:08 -05005670
Sergei Trofimovich33872062011-04-11 21:52:52 +00005671 if (dst_page != src_page) {
Chris Masona6591712011-07-19 12:04:14 -04005672 src_kaddr = page_address(src_page);
Sergei Trofimovich33872062011-04-11 21:52:52 +00005673 } else {
Chris Masond1310b22008-01-24 16:13:08 -05005674 src_kaddr = dst_kaddr;
Chris Mason727011e2010-08-06 13:21:20 -04005675 if (areas_overlap(src_off, dst_off, len))
5676 must_memmove = 1;
Sergei Trofimovich33872062011-04-11 21:52:52 +00005677 }
Chris Masond1310b22008-01-24 16:13:08 -05005678
Chris Mason727011e2010-08-06 13:21:20 -04005679 if (must_memmove)
5680 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
5681 else
5682 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
Chris Masond1310b22008-01-24 16:13:08 -05005683}
5684
5685void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5686 unsigned long src_offset, unsigned long len)
5687{
5688 size_t cur;
5689 size_t dst_off_in_page;
5690 size_t src_off_in_page;
5691 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5692 unsigned long dst_i;
5693 unsigned long src_i;
5694
5695 if (src_offset + len > dst->len) {
David Sterbaf14d1042015-10-08 11:37:06 +02005696 btrfs_err(dst->fs_info,
5697 "memmove bogus src_offset %lu move "
5698 "len %lu dst len %lu", src_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05005699 BUG_ON(1);
5700 }
5701 if (dst_offset + len > dst->len) {
David Sterbaf14d1042015-10-08 11:37:06 +02005702 btrfs_err(dst->fs_info,
5703 "memmove bogus dst_offset %lu move "
5704 "len %lu dst len %lu", dst_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05005705 BUG_ON(1);
5706 }
5707
Chris Masond3977122009-01-05 21:25:51 -05005708 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05005709 dst_off_in_page = (start_offset + dst_offset) &
Geert Uytterhoeven778746b2013-08-20 13:20:16 +02005710 (PAGE_CACHE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005711 src_off_in_page = (start_offset + src_offset) &
Geert Uytterhoeven778746b2013-08-20 13:20:16 +02005712 (PAGE_CACHE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005713
5714 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
5715 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
5716
5717 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
5718 src_off_in_page));
5719 cur = min_t(unsigned long, cur,
5720 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
5721
David Sterbafb85fc92014-07-31 01:03:53 +02005722 copy_pages(dst->pages[dst_i], dst->pages[src_i],
Chris Masond1310b22008-01-24 16:13:08 -05005723 dst_off_in_page, src_off_in_page, cur);
5724
5725 src_offset += cur;
5726 dst_offset += cur;
5727 len -= cur;
5728 }
5729}
Chris Masond1310b22008-01-24 16:13:08 -05005730
5731void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5732 unsigned long src_offset, unsigned long len)
5733{
5734 size_t cur;
5735 size_t dst_off_in_page;
5736 size_t src_off_in_page;
5737 unsigned long dst_end = dst_offset + len - 1;
5738 unsigned long src_end = src_offset + len - 1;
5739 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5740 unsigned long dst_i;
5741 unsigned long src_i;
5742
5743 if (src_offset + len > dst->len) {
David Sterbaf14d1042015-10-08 11:37:06 +02005744 btrfs_err(dst->fs_info, "memmove bogus src_offset %lu move "
5745 "len %lu len %lu", src_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05005746 BUG_ON(1);
5747 }
5748 if (dst_offset + len > dst->len) {
David Sterbaf14d1042015-10-08 11:37:06 +02005749 btrfs_err(dst->fs_info, "memmove bogus dst_offset %lu move "
5750 "len %lu len %lu", dst_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05005751 BUG_ON(1);
5752 }
Chris Mason727011e2010-08-06 13:21:20 -04005753 if (dst_offset < src_offset) {
Chris Masond1310b22008-01-24 16:13:08 -05005754 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
5755 return;
5756 }
Chris Masond3977122009-01-05 21:25:51 -05005757 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05005758 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
5759 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
5760
5761 dst_off_in_page = (start_offset + dst_end) &
Geert Uytterhoeven778746b2013-08-20 13:20:16 +02005762 (PAGE_CACHE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005763 src_off_in_page = (start_offset + src_end) &
Geert Uytterhoeven778746b2013-08-20 13:20:16 +02005764 (PAGE_CACHE_SIZE - 1);
Chris Masond1310b22008-01-24 16:13:08 -05005765
5766 cur = min_t(unsigned long, len, src_off_in_page + 1);
5767 cur = min(cur, dst_off_in_page + 1);
David Sterbafb85fc92014-07-31 01:03:53 +02005768 copy_pages(dst->pages[dst_i], dst->pages[src_i],
Chris Masond1310b22008-01-24 16:13:08 -05005769 dst_off_in_page - cur + 1,
5770 src_off_in_page - cur + 1, cur);
5771
5772 dst_end -= cur;
5773 src_end -= cur;
5774 len -= cur;
5775 }
5776}
Chris Mason6af118ce2008-07-22 11:18:07 -04005777
David Sterbaf7a52a42013-04-26 14:56:29 +00005778int try_release_extent_buffer(struct page *page)
Miao Xie19fe0a82010-10-26 20:57:29 -04005779{
Chris Mason6af118ce2008-07-22 11:18:07 -04005780 struct extent_buffer *eb;
Miao Xie897ca6e92010-10-26 20:57:29 -04005781
Miao Xie19fe0a82010-10-26 20:57:29 -04005782 /*
Josef Bacik3083ee22012-03-09 16:01:49 -05005783 * We need to make sure noboody is attaching this page to an eb right
5784 * now.
Miao Xie19fe0a82010-10-26 20:57:29 -04005785 */
Josef Bacik3083ee22012-03-09 16:01:49 -05005786 spin_lock(&page->mapping->private_lock);
5787 if (!PagePrivate(page)) {
5788 spin_unlock(&page->mapping->private_lock);
5789 return 1;
Miao Xie19fe0a82010-10-26 20:57:29 -04005790 }
5791
Josef Bacik3083ee22012-03-09 16:01:49 -05005792 eb = (struct extent_buffer *)page->private;
5793 BUG_ON(!eb);
Miao Xie19fe0a82010-10-26 20:57:29 -04005794
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005795 /*
Josef Bacik3083ee22012-03-09 16:01:49 -05005796 * This is a little awful but should be ok, we need to make sure that
5797 * the eb doesn't disappear out from under us while we're looking at
5798 * this page.
5799 */
5800 spin_lock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04005801 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
Josef Bacik3083ee22012-03-09 16:01:49 -05005802 spin_unlock(&eb->refs_lock);
5803 spin_unlock(&page->mapping->private_lock);
5804 return 0;
5805 }
5806 spin_unlock(&page->mapping->private_lock);
5807
Josef Bacik3083ee22012-03-09 16:01:49 -05005808 /*
5809 * If tree ref isn't set then we know the ref on this eb is a real ref,
5810 * so just return, this page will likely be freed soon anyway.
5811 */
5812 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5813 spin_unlock(&eb->refs_lock);
5814 return 0;
5815 }
Josef Bacik3083ee22012-03-09 16:01:49 -05005816
David Sterbaf7a52a42013-04-26 14:56:29 +00005817 return release_extent_buffer(eb);
Chris Mason6af118ce2008-07-22 11:18:07 -04005818}