blob: 3ad84f50068714bed0cee58724ea36368ece476a [file] [log] [blame]
Chris Masond1310b22008-01-24 16:13:08 -05001#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
Chris Masond1310b22008-01-24 16:13:08 -05005#include <linux/pagemap.h>
6#include <linux/page-flags.h>
7#include <linux/module.h>
8#include <linux/spinlock.h>
9#include <linux/blkdev.h>
10#include <linux/swap.h>
Chris Masond1310b22008-01-24 16:13:08 -050011#include <linux/writeback.h>
12#include <linux/pagevec.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070013#include <linux/prefetch.h>
Dan Magenheimer90a887c2011-05-26 10:01:56 -060014#include <linux/cleancache.h>
Chris Masond1310b22008-01-24 16:13:08 -050015#include "extent_io.h"
16#include "extent_map.h"
David Woodhouse2db04962008-08-07 11:19:43 -040017#include "compat.h"
David Woodhouse902b22f2008-08-20 08:51:49 -040018#include "ctree.h"
19#include "btrfs_inode.h"
Jan Schmidt4a54c8c2011-07-22 15:41:52 +020020#include "volumes.h"
Stefan Behrens21adbd52011-11-09 13:44:05 +010021#include "check-integrity.h"
Josef Bacik0b32f4b2012-03-13 09:38:00 -040022#include "locking.h"
Josef Bacik606686e2012-06-04 14:03:51 -040023#include "rcu-string.h"
Chris Masond1310b22008-01-24 16:13:08 -050024
Chris Masond1310b22008-01-24 16:13:08 -050025static struct kmem_cache *extent_state_cache;
26static struct kmem_cache *extent_buffer_cache;
27
28static LIST_HEAD(buffers);
29static LIST_HEAD(states);
Chris Mason4bef0842008-09-08 11:18:08 -040030
Chris Masonb47eda82008-11-10 12:34:40 -050031#define LEAK_DEBUG 0
Chris Mason39351272009-02-04 09:24:05 -050032#if LEAK_DEBUG
Chris Masond3977122009-01-05 21:25:51 -050033static DEFINE_SPINLOCK(leak_lock);
Chris Mason4bef0842008-09-08 11:18:08 -040034#endif
Chris Masond1310b22008-01-24 16:13:08 -050035
Chris Masond1310b22008-01-24 16:13:08 -050036#define BUFFER_LRU_MAX 64
37
38struct tree_entry {
39 u64 start;
40 u64 end;
Chris Masond1310b22008-01-24 16:13:08 -050041 struct rb_node rb_node;
42};
43
44struct extent_page_data {
45 struct bio *bio;
46 struct extent_io_tree *tree;
47 get_extent_t *get_extent;
Chris Mason771ed682008-11-06 22:02:51 -050048
49 /* tells writepage not to lock the state bits for this range
50 * it still does the unlocking
51 */
Chris Masonffbd5172009-04-20 15:50:09 -040052 unsigned int extent_locked:1;
53
54 /* tells the submit_bio code to use a WRITE_SYNC */
55 unsigned int sync_io:1;
Chris Masond1310b22008-01-24 16:13:08 -050056};
57
Josef Bacik0b32f4b2012-03-13 09:38:00 -040058static noinline void flush_write_bio(void *data);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -040059static inline struct btrfs_fs_info *
60tree_fs_info(struct extent_io_tree *tree)
61{
62 return btrfs_sb(tree->mapping->host->i_sb);
63}
Josef Bacik0b32f4b2012-03-13 09:38:00 -040064
Chris Masond1310b22008-01-24 16:13:08 -050065int __init extent_io_init(void)
66{
David Sterba837e1972012-09-07 03:00:48 -060067 extent_state_cache = kmem_cache_create("btrfs_extent_state",
Christoph Hellwig9601e3f2009-04-13 15:33:09 +020068 sizeof(struct extent_state), 0,
69 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -050070 if (!extent_state_cache)
71 return -ENOMEM;
72
David Sterba837e1972012-09-07 03:00:48 -060073 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
Christoph Hellwig9601e3f2009-04-13 15:33:09 +020074 sizeof(struct extent_buffer), 0,
75 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
Chris Masond1310b22008-01-24 16:13:08 -050076 if (!extent_buffer_cache)
77 goto free_state_cache;
78 return 0;
79
80free_state_cache:
81 kmem_cache_destroy(extent_state_cache);
82 return -ENOMEM;
83}
84
85void extent_io_exit(void)
86{
87 struct extent_state *state;
Chris Mason2d2ae542008-03-26 16:24:23 -040088 struct extent_buffer *eb;
Chris Masond1310b22008-01-24 16:13:08 -050089
90 while (!list_empty(&states)) {
Chris Mason2d2ae542008-03-26 16:24:23 -040091 state = list_entry(states.next, struct extent_state, leak_list);
Chris Masond3977122009-01-05 21:25:51 -050092 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
93 "state %lu in tree %p refs %d\n",
94 (unsigned long long)state->start,
95 (unsigned long long)state->end,
96 state->state, state->tree, atomic_read(&state->refs));
Chris Mason2d2ae542008-03-26 16:24:23 -040097 list_del(&state->leak_list);
Chris Masond1310b22008-01-24 16:13:08 -050098 kmem_cache_free(extent_state_cache, state);
99
100 }
101
Chris Mason2d2ae542008-03-26 16:24:23 -0400102 while (!list_empty(&buffers)) {
103 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
Chris Masond3977122009-01-05 21:25:51 -0500104 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
105 "refs %d\n", (unsigned long long)eb->start,
106 eb->len, atomic_read(&eb->refs));
Chris Mason2d2ae542008-03-26 16:24:23 -0400107 list_del(&eb->leak_list);
108 kmem_cache_free(extent_buffer_cache, eb);
109 }
Chris Masond1310b22008-01-24 16:13:08 -0500110 if (extent_state_cache)
111 kmem_cache_destroy(extent_state_cache);
112 if (extent_buffer_cache)
113 kmem_cache_destroy(extent_buffer_cache);
114}
115
116void extent_io_tree_init(struct extent_io_tree *tree,
David Sterbaf993c882011-04-20 23:35:57 +0200117 struct address_space *mapping)
Chris Masond1310b22008-01-24 16:13:08 -0500118{
Eric Paris6bef4d32010-02-23 19:43:04 +0000119 tree->state = RB_ROOT;
Miao Xie19fe0a82010-10-26 20:57:29 -0400120 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
Chris Masond1310b22008-01-24 16:13:08 -0500121 tree->ops = NULL;
122 tree->dirty_bytes = 0;
Chris Mason70dec802008-01-29 09:59:12 -0500123 spin_lock_init(&tree->lock);
Chris Mason6af118ce2008-07-22 11:18:07 -0400124 spin_lock_init(&tree->buffer_lock);
Chris Masond1310b22008-01-24 16:13:08 -0500125 tree->mapping = mapping;
Chris Masond1310b22008-01-24 16:13:08 -0500126}
Chris Masond1310b22008-01-24 16:13:08 -0500127
Christoph Hellwigb2950862008-12-02 09:54:17 -0500128static struct extent_state *alloc_extent_state(gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500129{
130 struct extent_state *state;
Chris Mason39351272009-02-04 09:24:05 -0500131#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400132 unsigned long flags;
Chris Mason4bef0842008-09-08 11:18:08 -0400133#endif
Chris Masond1310b22008-01-24 16:13:08 -0500134
135 state = kmem_cache_alloc(extent_state_cache, mask);
Peter2b114d12008-04-01 11:21:40 -0400136 if (!state)
Chris Masond1310b22008-01-24 16:13:08 -0500137 return state;
138 state->state = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500139 state->private = 0;
Chris Mason70dec802008-01-29 09:59:12 -0500140 state->tree = NULL;
Chris Mason39351272009-02-04 09:24:05 -0500141#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400142 spin_lock_irqsave(&leak_lock, flags);
143 list_add(&state->leak_list, &states);
144 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -0400145#endif
Chris Masond1310b22008-01-24 16:13:08 -0500146 atomic_set(&state->refs, 1);
147 init_waitqueue_head(&state->wq);
Jeff Mahoney143bede2012-03-01 14:56:26 +0100148 trace_alloc_extent_state(state, mask, _RET_IP_);
Chris Masond1310b22008-01-24 16:13:08 -0500149 return state;
150}
Chris Masond1310b22008-01-24 16:13:08 -0500151
Chris Mason4845e442010-05-25 20:56:50 -0400152void free_extent_state(struct extent_state *state)
Chris Masond1310b22008-01-24 16:13:08 -0500153{
Chris Masond1310b22008-01-24 16:13:08 -0500154 if (!state)
155 return;
156 if (atomic_dec_and_test(&state->refs)) {
Chris Mason39351272009-02-04 09:24:05 -0500157#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400158 unsigned long flags;
Chris Mason4bef0842008-09-08 11:18:08 -0400159#endif
Chris Mason70dec802008-01-29 09:59:12 -0500160 WARN_ON(state->tree);
Chris Mason39351272009-02-04 09:24:05 -0500161#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400162 spin_lock_irqsave(&leak_lock, flags);
163 list_del(&state->leak_list);
164 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -0400165#endif
Jeff Mahoney143bede2012-03-01 14:56:26 +0100166 trace_free_extent_state(state, _RET_IP_);
Chris Masond1310b22008-01-24 16:13:08 -0500167 kmem_cache_free(extent_state_cache, state);
168 }
169}
Chris Masond1310b22008-01-24 16:13:08 -0500170
171static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
172 struct rb_node *node)
173{
Chris Masond3977122009-01-05 21:25:51 -0500174 struct rb_node **p = &root->rb_node;
175 struct rb_node *parent = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500176 struct tree_entry *entry;
177
Chris Masond3977122009-01-05 21:25:51 -0500178 while (*p) {
Chris Masond1310b22008-01-24 16:13:08 -0500179 parent = *p;
180 entry = rb_entry(parent, struct tree_entry, rb_node);
181
182 if (offset < entry->start)
183 p = &(*p)->rb_left;
184 else if (offset > entry->end)
185 p = &(*p)->rb_right;
186 else
187 return parent;
188 }
189
Chris Masond1310b22008-01-24 16:13:08 -0500190 rb_link_node(node, parent, p);
191 rb_insert_color(node, root);
192 return NULL;
193}
194
Chris Mason80ea96b2008-02-01 14:51:59 -0500195static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
Chris Masond1310b22008-01-24 16:13:08 -0500196 struct rb_node **prev_ret,
197 struct rb_node **next_ret)
198{
Chris Mason80ea96b2008-02-01 14:51:59 -0500199 struct rb_root *root = &tree->state;
Chris Masond3977122009-01-05 21:25:51 -0500200 struct rb_node *n = root->rb_node;
Chris Masond1310b22008-01-24 16:13:08 -0500201 struct rb_node *prev = NULL;
202 struct rb_node *orig_prev = NULL;
203 struct tree_entry *entry;
204 struct tree_entry *prev_entry = NULL;
205
Chris Masond3977122009-01-05 21:25:51 -0500206 while (n) {
Chris Masond1310b22008-01-24 16:13:08 -0500207 entry = rb_entry(n, struct tree_entry, rb_node);
208 prev = n;
209 prev_entry = entry;
210
211 if (offset < entry->start)
212 n = n->rb_left;
213 else if (offset > entry->end)
214 n = n->rb_right;
Chris Masond3977122009-01-05 21:25:51 -0500215 else
Chris Masond1310b22008-01-24 16:13:08 -0500216 return n;
217 }
218
219 if (prev_ret) {
220 orig_prev = prev;
Chris Masond3977122009-01-05 21:25:51 -0500221 while (prev && offset > prev_entry->end) {
Chris Masond1310b22008-01-24 16:13:08 -0500222 prev = rb_next(prev);
223 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
224 }
225 *prev_ret = prev;
226 prev = orig_prev;
227 }
228
229 if (next_ret) {
230 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
Chris Masond3977122009-01-05 21:25:51 -0500231 while (prev && offset < prev_entry->start) {
Chris Masond1310b22008-01-24 16:13:08 -0500232 prev = rb_prev(prev);
233 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
234 }
235 *next_ret = prev;
236 }
237 return NULL;
238}
239
Chris Mason80ea96b2008-02-01 14:51:59 -0500240static inline struct rb_node *tree_search(struct extent_io_tree *tree,
241 u64 offset)
Chris Masond1310b22008-01-24 16:13:08 -0500242{
Chris Mason70dec802008-01-29 09:59:12 -0500243 struct rb_node *prev = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500244 struct rb_node *ret;
Chris Mason70dec802008-01-29 09:59:12 -0500245
Chris Mason80ea96b2008-02-01 14:51:59 -0500246 ret = __etree_search(tree, offset, &prev, NULL);
Chris Masond3977122009-01-05 21:25:51 -0500247 if (!ret)
Chris Masond1310b22008-01-24 16:13:08 -0500248 return prev;
249 return ret;
250}
251
Josef Bacik9ed74f22009-09-11 16:12:44 -0400252static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
253 struct extent_state *other)
254{
255 if (tree->ops && tree->ops->merge_extent_hook)
256 tree->ops->merge_extent_hook(tree->mapping->host, new,
257 other);
258}
259
Chris Masond1310b22008-01-24 16:13:08 -0500260/*
261 * utility function to look for merge candidates inside a given range.
262 * Any extents with matching state are merged together into a single
263 * extent in the tree. Extents with EXTENT_IO in their state field
264 * are not merged because the end_io handlers need to be able to do
265 * operations on them without sleeping (or doing allocations/splits).
266 *
267 * This should be called with the tree lock held.
268 */
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000269static void merge_state(struct extent_io_tree *tree,
270 struct extent_state *state)
Chris Masond1310b22008-01-24 16:13:08 -0500271{
272 struct extent_state *other;
273 struct rb_node *other_node;
274
Zheng Yan5b21f2e2008-09-26 10:05:38 -0400275 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000276 return;
Chris Masond1310b22008-01-24 16:13:08 -0500277
278 other_node = rb_prev(&state->rb_node);
279 if (other_node) {
280 other = rb_entry(other_node, struct extent_state, rb_node);
281 if (other->end == state->start - 1 &&
282 other->state == state->state) {
Josef Bacik9ed74f22009-09-11 16:12:44 -0400283 merge_cb(tree, state, other);
Chris Masond1310b22008-01-24 16:13:08 -0500284 state->start = other->start;
Chris Mason70dec802008-01-29 09:59:12 -0500285 other->tree = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500286 rb_erase(&other->rb_node, &tree->state);
287 free_extent_state(other);
288 }
289 }
290 other_node = rb_next(&state->rb_node);
291 if (other_node) {
292 other = rb_entry(other_node, struct extent_state, rb_node);
293 if (other->start == state->end + 1 &&
294 other->state == state->state) {
Josef Bacik9ed74f22009-09-11 16:12:44 -0400295 merge_cb(tree, state, other);
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400296 state->end = other->end;
297 other->tree = NULL;
298 rb_erase(&other->rb_node, &tree->state);
299 free_extent_state(other);
Chris Masond1310b22008-01-24 16:13:08 -0500300 }
301 }
Chris Masond1310b22008-01-24 16:13:08 -0500302}
303
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000304static void set_state_cb(struct extent_io_tree *tree,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400305 struct extent_state *state, int *bits)
Chris Mason291d6732008-01-29 15:55:23 -0500306{
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000307 if (tree->ops && tree->ops->set_bit_hook)
308 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
Chris Mason291d6732008-01-29 15:55:23 -0500309}
310
311static void clear_state_cb(struct extent_io_tree *tree,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400312 struct extent_state *state, int *bits)
Chris Mason291d6732008-01-29 15:55:23 -0500313{
Josef Bacik9ed74f22009-09-11 16:12:44 -0400314 if (tree->ops && tree->ops->clear_bit_hook)
315 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
Chris Mason291d6732008-01-29 15:55:23 -0500316}
317
Xiao Guangrong3150b692011-07-14 03:19:08 +0000318static void set_state_bits(struct extent_io_tree *tree,
319 struct extent_state *state, int *bits);
320
Chris Masond1310b22008-01-24 16:13:08 -0500321/*
322 * insert an extent_state struct into the tree. 'bits' are set on the
323 * struct before it is inserted.
324 *
325 * This may return -EEXIST if the extent is already there, in which case the
326 * state struct is freed.
327 *
328 * The tree lock is not taken internally. This is a utility function and
329 * probably isn't what you want to call (see set/clear_extent_bit).
330 */
331static int insert_state(struct extent_io_tree *tree,
332 struct extent_state *state, u64 start, u64 end,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400333 int *bits)
Chris Masond1310b22008-01-24 16:13:08 -0500334{
335 struct rb_node *node;
336
337 if (end < start) {
Chris Masond3977122009-01-05 21:25:51 -0500338 printk(KERN_ERR "btrfs end < start %llu %llu\n",
339 (unsigned long long)end,
340 (unsigned long long)start);
Chris Masond1310b22008-01-24 16:13:08 -0500341 WARN_ON(1);
342 }
Chris Masond1310b22008-01-24 16:13:08 -0500343 state->start = start;
344 state->end = end;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400345
Xiao Guangrong3150b692011-07-14 03:19:08 +0000346 set_state_bits(tree, state, bits);
347
Chris Masond1310b22008-01-24 16:13:08 -0500348 node = tree_insert(&tree->state, end, &state->rb_node);
349 if (node) {
350 struct extent_state *found;
351 found = rb_entry(node, struct extent_state, rb_node);
Chris Masond3977122009-01-05 21:25:51 -0500352 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
353 "%llu %llu\n", (unsigned long long)found->start,
354 (unsigned long long)found->end,
355 (unsigned long long)start, (unsigned long long)end);
Chris Masond1310b22008-01-24 16:13:08 -0500356 return -EEXIST;
357 }
Chris Mason70dec802008-01-29 09:59:12 -0500358 state->tree = tree;
Chris Masond1310b22008-01-24 16:13:08 -0500359 merge_state(tree, state);
360 return 0;
361}
362
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000363static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
Josef Bacik9ed74f22009-09-11 16:12:44 -0400364 u64 split)
365{
366 if (tree->ops && tree->ops->split_extent_hook)
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000367 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400368}
369
Chris Masond1310b22008-01-24 16:13:08 -0500370/*
371 * split a given extent state struct in two, inserting the preallocated
372 * struct 'prealloc' as the newly created second half. 'split' indicates an
373 * offset inside 'orig' where it should be split.
374 *
375 * Before calling,
376 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
377 * are two extent state structs in the tree:
378 * prealloc: [orig->start, split - 1]
379 * orig: [ split, orig->end ]
380 *
381 * The tree locks are not taken by this function. They need to be held
382 * by the caller.
383 */
384static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
385 struct extent_state *prealloc, u64 split)
386{
387 struct rb_node *node;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400388
389 split_cb(tree, orig, split);
390
Chris Masond1310b22008-01-24 16:13:08 -0500391 prealloc->start = orig->start;
392 prealloc->end = split - 1;
393 prealloc->state = orig->state;
394 orig->start = split;
395
396 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
397 if (node) {
Chris Masond1310b22008-01-24 16:13:08 -0500398 free_extent_state(prealloc);
399 return -EEXIST;
400 }
Chris Mason70dec802008-01-29 09:59:12 -0500401 prealloc->tree = tree;
Chris Masond1310b22008-01-24 16:13:08 -0500402 return 0;
403}
404
Li Zefancdc6a392012-03-12 16:39:48 +0800405static struct extent_state *next_state(struct extent_state *state)
406{
407 struct rb_node *next = rb_next(&state->rb_node);
408 if (next)
409 return rb_entry(next, struct extent_state, rb_node);
410 else
411 return NULL;
412}
413
Chris Masond1310b22008-01-24 16:13:08 -0500414/*
415 * utility function to clear some bits in an extent state struct.
Wang Sheng-Hui1b303fc2012-04-06 14:35:18 +0800416 * it will optionally wake up any one waiting on this state (wake == 1).
Chris Masond1310b22008-01-24 16:13:08 -0500417 *
418 * If no bits are set on the state struct after clearing things, the
419 * struct is freed and removed from the tree
420 */
Li Zefancdc6a392012-03-12 16:39:48 +0800421static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
422 struct extent_state *state,
423 int *bits, int wake)
Chris Masond1310b22008-01-24 16:13:08 -0500424{
Li Zefancdc6a392012-03-12 16:39:48 +0800425 struct extent_state *next;
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400426 int bits_to_clear = *bits & ~EXTENT_CTLBITS;
Chris Masond1310b22008-01-24 16:13:08 -0500427
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400428 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500429 u64 range = state->end - state->start + 1;
430 WARN_ON(range > tree->dirty_bytes);
431 tree->dirty_bytes -= range;
432 }
Chris Mason291d6732008-01-29 15:55:23 -0500433 clear_state_cb(tree, state, bits);
Josef Bacik32c00af2009-10-08 13:34:05 -0400434 state->state &= ~bits_to_clear;
Chris Masond1310b22008-01-24 16:13:08 -0500435 if (wake)
436 wake_up(&state->wq);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400437 if (state->state == 0) {
Li Zefancdc6a392012-03-12 16:39:48 +0800438 next = next_state(state);
Chris Mason70dec802008-01-29 09:59:12 -0500439 if (state->tree) {
Chris Masond1310b22008-01-24 16:13:08 -0500440 rb_erase(&state->rb_node, &tree->state);
Chris Mason70dec802008-01-29 09:59:12 -0500441 state->tree = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500442 free_extent_state(state);
443 } else {
444 WARN_ON(1);
445 }
446 } else {
447 merge_state(tree, state);
Li Zefancdc6a392012-03-12 16:39:48 +0800448 next = next_state(state);
Chris Masond1310b22008-01-24 16:13:08 -0500449 }
Li Zefancdc6a392012-03-12 16:39:48 +0800450 return next;
Chris Masond1310b22008-01-24 16:13:08 -0500451}
452
Xiao Guangrong82337672011-04-20 06:44:57 +0000453static struct extent_state *
454alloc_extent_state_atomic(struct extent_state *prealloc)
455{
456 if (!prealloc)
457 prealloc = alloc_extent_state(GFP_ATOMIC);
458
459 return prealloc;
460}
461
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400462void extent_io_tree_panic(struct extent_io_tree *tree, int err)
463{
464 btrfs_panic(tree_fs_info(tree), err, "Locking error: "
465 "Extent tree was modified by another "
466 "thread while locked.");
467}
468
Chris Masond1310b22008-01-24 16:13:08 -0500469/*
470 * clear some bits on a range in the tree. This may require splitting
471 * or inserting elements in the tree, so the gfp mask is used to
472 * indicate which allocations or sleeping are allowed.
473 *
474 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
475 * the given range from the tree regardless of state (ie for truncate).
476 *
477 * the range [start, end] is inclusive.
478 *
Jeff Mahoney6763af82012-03-01 14:56:29 +0100479 * This takes the tree lock, and returns 0 on success and < 0 on error.
Chris Masond1310b22008-01-24 16:13:08 -0500480 */
481int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
Chris Mason2c64c532009-09-02 15:04:12 -0400482 int bits, int wake, int delete,
483 struct extent_state **cached_state,
484 gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500485{
486 struct extent_state *state;
Chris Mason2c64c532009-09-02 15:04:12 -0400487 struct extent_state *cached;
Chris Masond1310b22008-01-24 16:13:08 -0500488 struct extent_state *prealloc = NULL;
489 struct rb_node *node;
Yan Zheng5c939df2009-05-27 09:16:03 -0400490 u64 last_end;
Chris Masond1310b22008-01-24 16:13:08 -0500491 int err;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000492 int clear = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500493
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400494 if (delete)
495 bits |= ~EXTENT_CTLBITS;
496 bits |= EXTENT_FIRST_DELALLOC;
497
Josef Bacik2ac55d42010-02-03 19:33:23 +0000498 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
499 clear = 1;
Chris Masond1310b22008-01-24 16:13:08 -0500500again:
501 if (!prealloc && (mask & __GFP_WAIT)) {
502 prealloc = alloc_extent_state(mask);
503 if (!prealloc)
504 return -ENOMEM;
505 }
506
Chris Masoncad321a2008-12-17 14:51:42 -0500507 spin_lock(&tree->lock);
Chris Mason2c64c532009-09-02 15:04:12 -0400508 if (cached_state) {
509 cached = *cached_state;
Josef Bacik2ac55d42010-02-03 19:33:23 +0000510
511 if (clear) {
512 *cached_state = NULL;
513 cached_state = NULL;
514 }
515
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400516 if (cached && cached->tree && cached->start <= start &&
517 cached->end > start) {
Josef Bacik2ac55d42010-02-03 19:33:23 +0000518 if (clear)
519 atomic_dec(&cached->refs);
Chris Mason2c64c532009-09-02 15:04:12 -0400520 state = cached;
Chris Mason42daec22009-09-23 19:51:09 -0400521 goto hit_next;
Chris Mason2c64c532009-09-02 15:04:12 -0400522 }
Josef Bacik2ac55d42010-02-03 19:33:23 +0000523 if (clear)
524 free_extent_state(cached);
Chris Mason2c64c532009-09-02 15:04:12 -0400525 }
Chris Masond1310b22008-01-24 16:13:08 -0500526 /*
527 * this search will find the extents that end after
528 * our range starts
529 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500530 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500531 if (!node)
532 goto out;
533 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason2c64c532009-09-02 15:04:12 -0400534hit_next:
Chris Masond1310b22008-01-24 16:13:08 -0500535 if (state->start > end)
536 goto out;
537 WARN_ON(state->end < start);
Yan Zheng5c939df2009-05-27 09:16:03 -0400538 last_end = state->end;
Chris Masond1310b22008-01-24 16:13:08 -0500539
Liu Bo04493142012-02-16 18:34:37 +0800540 /* the state doesn't have the wanted bits, go ahead */
Li Zefancdc6a392012-03-12 16:39:48 +0800541 if (!(state->state & bits)) {
542 state = next_state(state);
Liu Bo04493142012-02-16 18:34:37 +0800543 goto next;
Li Zefancdc6a392012-03-12 16:39:48 +0800544 }
Liu Bo04493142012-02-16 18:34:37 +0800545
Chris Masond1310b22008-01-24 16:13:08 -0500546 /*
547 * | ---- desired range ---- |
548 * | state | or
549 * | ------------- state -------------- |
550 *
551 * We need to split the extent we found, and may flip
552 * bits on second half.
553 *
554 * If the extent we found extends past our range, we
555 * just split and search again. It'll get split again
556 * the next time though.
557 *
558 * If the extent we found is inside our range, we clear
559 * the desired bit on it.
560 */
561
562 if (state->start < start) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000563 prealloc = alloc_extent_state_atomic(prealloc);
564 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500565 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400566 if (err)
567 extent_io_tree_panic(tree, err);
568
Chris Masond1310b22008-01-24 16:13:08 -0500569 prealloc = NULL;
570 if (err)
571 goto out;
572 if (state->end <= end) {
Liu Bod1ac6e42012-05-10 18:10:39 +0800573 state = clear_state_bit(tree, state, &bits, wake);
574 goto next;
Chris Masond1310b22008-01-24 16:13:08 -0500575 }
576 goto search_again;
577 }
578 /*
579 * | ---- desired range ---- |
580 * | state |
581 * We need to split the extent, and clear the bit
582 * on the first half
583 */
584 if (state->start <= end && state->end > end) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000585 prealloc = alloc_extent_state_atomic(prealloc);
586 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500587 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400588 if (err)
589 extent_io_tree_panic(tree, err);
590
Chris Masond1310b22008-01-24 16:13:08 -0500591 if (wake)
592 wake_up(&state->wq);
Chris Mason42daec22009-09-23 19:51:09 -0400593
Jeff Mahoney6763af82012-03-01 14:56:29 +0100594 clear_state_bit(tree, prealloc, &bits, wake);
Josef Bacik9ed74f22009-09-11 16:12:44 -0400595
Chris Masond1310b22008-01-24 16:13:08 -0500596 prealloc = NULL;
597 goto out;
598 }
Chris Mason42daec22009-09-23 19:51:09 -0400599
Li Zefancdc6a392012-03-12 16:39:48 +0800600 state = clear_state_bit(tree, state, &bits, wake);
Liu Bo04493142012-02-16 18:34:37 +0800601next:
Yan Zheng5c939df2009-05-27 09:16:03 -0400602 if (last_end == (u64)-1)
603 goto out;
604 start = last_end + 1;
Li Zefancdc6a392012-03-12 16:39:48 +0800605 if (start <= end && state && !need_resched())
Liu Bo692e5752012-02-16 18:34:36 +0800606 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -0500607 goto search_again;
608
609out:
Chris Masoncad321a2008-12-17 14:51:42 -0500610 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500611 if (prealloc)
612 free_extent_state(prealloc);
613
Jeff Mahoney6763af82012-03-01 14:56:29 +0100614 return 0;
Chris Masond1310b22008-01-24 16:13:08 -0500615
616search_again:
617 if (start > end)
618 goto out;
Chris Masoncad321a2008-12-17 14:51:42 -0500619 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500620 if (mask & __GFP_WAIT)
621 cond_resched();
622 goto again;
623}
Chris Masond1310b22008-01-24 16:13:08 -0500624
Jeff Mahoney143bede2012-03-01 14:56:26 +0100625static void wait_on_state(struct extent_io_tree *tree,
626 struct extent_state *state)
Christoph Hellwig641f5212008-12-02 06:36:10 -0500627 __releases(tree->lock)
628 __acquires(tree->lock)
Chris Masond1310b22008-01-24 16:13:08 -0500629{
630 DEFINE_WAIT(wait);
631 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
Chris Masoncad321a2008-12-17 14:51:42 -0500632 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500633 schedule();
Chris Masoncad321a2008-12-17 14:51:42 -0500634 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500635 finish_wait(&state->wq, &wait);
Chris Masond1310b22008-01-24 16:13:08 -0500636}
637
638/*
639 * waits for one or more bits to clear on a range in the state tree.
640 * The range [start, end] is inclusive.
641 * The tree lock is taken by this function
642 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100643void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
Chris Masond1310b22008-01-24 16:13:08 -0500644{
645 struct extent_state *state;
646 struct rb_node *node;
647
Chris Masoncad321a2008-12-17 14:51:42 -0500648 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500649again:
650 while (1) {
651 /*
652 * this search will find all the extents that end after
653 * our range starts
654 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500655 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500656 if (!node)
657 break;
658
659 state = rb_entry(node, struct extent_state, rb_node);
660
661 if (state->start > end)
662 goto out;
663
664 if (state->state & bits) {
665 start = state->start;
666 atomic_inc(&state->refs);
667 wait_on_state(tree, state);
668 free_extent_state(state);
669 goto again;
670 }
671 start = state->end + 1;
672
673 if (start > end)
674 break;
675
Xiao Guangrongded91f02011-07-14 03:19:27 +0000676 cond_resched_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500677 }
678out:
Chris Masoncad321a2008-12-17 14:51:42 -0500679 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500680}
Chris Masond1310b22008-01-24 16:13:08 -0500681
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000682static void set_state_bits(struct extent_io_tree *tree,
Chris Masond1310b22008-01-24 16:13:08 -0500683 struct extent_state *state,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400684 int *bits)
Chris Masond1310b22008-01-24 16:13:08 -0500685{
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400686 int bits_to_set = *bits & ~EXTENT_CTLBITS;
Josef Bacik9ed74f22009-09-11 16:12:44 -0400687
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000688 set_state_cb(tree, state, bits);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400689 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
Chris Masond1310b22008-01-24 16:13:08 -0500690 u64 range = state->end - state->start + 1;
691 tree->dirty_bytes += range;
692 }
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400693 state->state |= bits_to_set;
Chris Masond1310b22008-01-24 16:13:08 -0500694}
695
Chris Mason2c64c532009-09-02 15:04:12 -0400696static void cache_state(struct extent_state *state,
697 struct extent_state **cached_ptr)
698{
699 if (cached_ptr && !(*cached_ptr)) {
700 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
701 *cached_ptr = state;
702 atomic_inc(&state->refs);
703 }
704 }
705}
706
Arne Jansen507903b2011-04-06 10:02:20 +0000707static void uncache_state(struct extent_state **cached_ptr)
708{
709 if (cached_ptr && (*cached_ptr)) {
710 struct extent_state *state = *cached_ptr;
Chris Mason109b36a2011-04-12 13:57:39 -0400711 *cached_ptr = NULL;
712 free_extent_state(state);
Arne Jansen507903b2011-04-06 10:02:20 +0000713 }
714}
715
Chris Masond1310b22008-01-24 16:13:08 -0500716/*
Chris Mason1edbb732009-09-02 13:24:36 -0400717 * set some bits on a range in the tree. This may require allocations or
718 * sleeping, so the gfp mask is used to indicate what is allowed.
Chris Masond1310b22008-01-24 16:13:08 -0500719 *
Chris Mason1edbb732009-09-02 13:24:36 -0400720 * If any of the exclusive bits are set, this will fail with -EEXIST if some
721 * part of the range already has the desired bits set. The start of the
722 * existing range is returned in failed_start in this case.
Chris Masond1310b22008-01-24 16:13:08 -0500723 *
Chris Mason1edbb732009-09-02 13:24:36 -0400724 * [start, end] is inclusive This takes the tree lock.
Chris Masond1310b22008-01-24 16:13:08 -0500725 */
Chris Mason1edbb732009-09-02 13:24:36 -0400726
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +0100727static int __must_check
728__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
729 int bits, int exclusive_bits, u64 *failed_start,
730 struct extent_state **cached_state, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -0500731{
732 struct extent_state *state;
733 struct extent_state *prealloc = NULL;
734 struct rb_node *node;
Chris Masond1310b22008-01-24 16:13:08 -0500735 int err = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500736 u64 last_start;
737 u64 last_end;
Chris Mason42daec22009-09-23 19:51:09 -0400738
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400739 bits |= EXTENT_FIRST_DELALLOC;
Chris Masond1310b22008-01-24 16:13:08 -0500740again:
741 if (!prealloc && (mask & __GFP_WAIT)) {
742 prealloc = alloc_extent_state(mask);
Xiao Guangrong82337672011-04-20 06:44:57 +0000743 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500744 }
745
Chris Masoncad321a2008-12-17 14:51:42 -0500746 spin_lock(&tree->lock);
Chris Mason9655d292009-09-02 15:22:30 -0400747 if (cached_state && *cached_state) {
748 state = *cached_state;
Josef Bacikdf98b6e2011-06-20 14:53:48 -0400749 if (state->start <= start && state->end > start &&
750 state->tree) {
Chris Mason9655d292009-09-02 15:22:30 -0400751 node = &state->rb_node;
752 goto hit_next;
753 }
754 }
Chris Masond1310b22008-01-24 16:13:08 -0500755 /*
756 * this search will find all the extents that end after
757 * our range starts.
758 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500759 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500760 if (!node) {
Xiao Guangrong82337672011-04-20 06:44:57 +0000761 prealloc = alloc_extent_state_atomic(prealloc);
762 BUG_ON(!prealloc);
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400763 err = insert_state(tree, prealloc, start, end, &bits);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400764 if (err)
765 extent_io_tree_panic(tree, err);
766
Chris Masond1310b22008-01-24 16:13:08 -0500767 prealloc = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500768 goto out;
769 }
Chris Masond1310b22008-01-24 16:13:08 -0500770 state = rb_entry(node, struct extent_state, rb_node);
Chris Mason40431d62009-08-05 12:57:59 -0400771hit_next:
Chris Masond1310b22008-01-24 16:13:08 -0500772 last_start = state->start;
773 last_end = state->end;
774
775 /*
776 * | ---- desired range ---- |
777 * | state |
778 *
779 * Just lock what we found and keep going
780 */
781 if (state->start == start && state->end <= end) {
Chris Mason1edbb732009-09-02 13:24:36 -0400782 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -0500783 *failed_start = state->start;
784 err = -EEXIST;
785 goto out;
786 }
Chris Mason42daec22009-09-23 19:51:09 -0400787
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000788 set_state_bits(tree, state, &bits);
Chris Mason2c64c532009-09-02 15:04:12 -0400789 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500790 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -0400791 if (last_end == (u64)-1)
792 goto out;
793 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +0800794 state = next_state(state);
795 if (start < end && state && state->start == start &&
796 !need_resched())
797 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -0500798 goto search_again;
799 }
800
801 /*
802 * | ---- desired range ---- |
803 * | state |
804 * or
805 * | ------------- state -------------- |
806 *
807 * We need to split the extent we found, and may flip bits on
808 * second half.
809 *
810 * If the extent we found extends past our
811 * range, we just split and search again. It'll get split
812 * again the next time though.
813 *
814 * If the extent we found is inside our range, we set the
815 * desired bit on it.
816 */
817 if (state->start < start) {
Chris Mason1edbb732009-09-02 13:24:36 -0400818 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -0500819 *failed_start = start;
820 err = -EEXIST;
821 goto out;
822 }
Xiao Guangrong82337672011-04-20 06:44:57 +0000823
824 prealloc = alloc_extent_state_atomic(prealloc);
825 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500826 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400827 if (err)
828 extent_io_tree_panic(tree, err);
829
Chris Masond1310b22008-01-24 16:13:08 -0500830 prealloc = NULL;
831 if (err)
832 goto out;
833 if (state->end <= end) {
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000834 set_state_bits(tree, state, &bits);
Chris Mason2c64c532009-09-02 15:04:12 -0400835 cache_state(state, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500836 merge_state(tree, state);
Yan Zheng5c939df2009-05-27 09:16:03 -0400837 if (last_end == (u64)-1)
838 goto out;
839 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +0800840 state = next_state(state);
841 if (start < end && state && state->start == start &&
842 !need_resched())
843 goto hit_next;
Chris Masond1310b22008-01-24 16:13:08 -0500844 }
845 goto search_again;
846 }
847 /*
848 * | ---- desired range ---- |
849 * | state | or | state |
850 *
851 * There's a hole, we need to insert something in it and
852 * ignore the extent we found.
853 */
854 if (state->start > start) {
855 u64 this_end;
856 if (end < last_start)
857 this_end = end;
858 else
Chris Masond3977122009-01-05 21:25:51 -0500859 this_end = last_start - 1;
Xiao Guangrong82337672011-04-20 06:44:57 +0000860
861 prealloc = alloc_extent_state_atomic(prealloc);
862 BUG_ON(!prealloc);
Xiao Guangrongc7f895a2011-04-20 06:45:49 +0000863
864 /*
865 * Avoid to free 'prealloc' if it can be merged with
866 * the later extent.
867 */
Chris Masond1310b22008-01-24 16:13:08 -0500868 err = insert_state(tree, prealloc, start, this_end,
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -0400869 &bits);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400870 if (err)
871 extent_io_tree_panic(tree, err);
872
Chris Mason2c64c532009-09-02 15:04:12 -0400873 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500874 prealloc = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500875 start = this_end + 1;
876 goto search_again;
877 }
878 /*
879 * | ---- desired range ---- |
880 * | state |
881 * We need to split the extent, and set the bit
882 * on the first half
883 */
884 if (state->start <= end && state->end > end) {
Chris Mason1edbb732009-09-02 13:24:36 -0400885 if (state->state & exclusive_bits) {
Chris Masond1310b22008-01-24 16:13:08 -0500886 *failed_start = start;
887 err = -EEXIST;
888 goto out;
889 }
Xiao Guangrong82337672011-04-20 06:44:57 +0000890
891 prealloc = alloc_extent_state_atomic(prealloc);
892 BUG_ON(!prealloc);
Chris Masond1310b22008-01-24 16:13:08 -0500893 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400894 if (err)
895 extent_io_tree_panic(tree, err);
Chris Masond1310b22008-01-24 16:13:08 -0500896
Jeff Mahoney1bf85042011-07-21 16:56:09 +0000897 set_state_bits(tree, prealloc, &bits);
Chris Mason2c64c532009-09-02 15:04:12 -0400898 cache_state(prealloc, cached_state);
Chris Masond1310b22008-01-24 16:13:08 -0500899 merge_state(tree, prealloc);
900 prealloc = NULL;
901 goto out;
902 }
903
904 goto search_again;
905
906out:
Chris Masoncad321a2008-12-17 14:51:42 -0500907 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500908 if (prealloc)
909 free_extent_state(prealloc);
910
911 return err;
912
913search_again:
914 if (start > end)
915 goto out;
Chris Masoncad321a2008-12-17 14:51:42 -0500916 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500917 if (mask & __GFP_WAIT)
918 cond_resched();
919 goto again;
920}
Chris Masond1310b22008-01-24 16:13:08 -0500921
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +0100922int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
923 u64 *failed_start, struct extent_state **cached_state,
924 gfp_t mask)
925{
926 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
927 cached_state, mask);
928}
929
930
Josef Bacik462d6fa2011-09-26 13:56:12 -0400931/**
Liu Bo10983f22012-07-11 15:26:19 +0800932 * convert_extent_bit - convert all bits in a given range from one bit to
933 * another
Josef Bacik462d6fa2011-09-26 13:56:12 -0400934 * @tree: the io tree to search
935 * @start: the start offset in bytes
936 * @end: the end offset in bytes (inclusive)
937 * @bits: the bits to set in this range
938 * @clear_bits: the bits to clear in this range
939 * @mask: the allocation mask
940 *
941 * This will go through and set bits for the given range. If any states exist
942 * already in this range they are set with the given bit and cleared of the
943 * clear_bits. This is only meant to be used by things that are mergeable, ie
944 * converting from say DELALLOC to DIRTY. This is not meant to be used with
945 * boundary bits like LOCK.
946 */
947int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
948 int bits, int clear_bits, gfp_t mask)
949{
950 struct extent_state *state;
951 struct extent_state *prealloc = NULL;
952 struct rb_node *node;
953 int err = 0;
954 u64 last_start;
955 u64 last_end;
956
957again:
958 if (!prealloc && (mask & __GFP_WAIT)) {
959 prealloc = alloc_extent_state(mask);
960 if (!prealloc)
961 return -ENOMEM;
962 }
963
964 spin_lock(&tree->lock);
965 /*
966 * this search will find all the extents that end after
967 * our range starts.
968 */
969 node = tree_search(tree, start);
970 if (!node) {
971 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -0500972 if (!prealloc) {
973 err = -ENOMEM;
974 goto out;
975 }
Josef Bacik462d6fa2011-09-26 13:56:12 -0400976 err = insert_state(tree, prealloc, start, end, &bits);
977 prealloc = NULL;
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -0400978 if (err)
979 extent_io_tree_panic(tree, err);
Josef Bacik462d6fa2011-09-26 13:56:12 -0400980 goto out;
981 }
982 state = rb_entry(node, struct extent_state, rb_node);
983hit_next:
984 last_start = state->start;
985 last_end = state->end;
986
987 /*
988 * | ---- desired range ---- |
989 * | state |
990 *
991 * Just lock what we found and keep going
992 */
993 if (state->start == start && state->end <= end) {
Josef Bacik462d6fa2011-09-26 13:56:12 -0400994 set_state_bits(tree, state, &bits);
Liu Bod1ac6e42012-05-10 18:10:39 +0800995 state = clear_state_bit(tree, state, &clear_bits, 0);
Josef Bacik462d6fa2011-09-26 13:56:12 -0400996 if (last_end == (u64)-1)
997 goto out;
Josef Bacik462d6fa2011-09-26 13:56:12 -0400998 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +0800999 if (start < end && state && state->start == start &&
1000 !need_resched())
1001 goto hit_next;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001002 goto search_again;
1003 }
1004
1005 /*
1006 * | ---- desired range ---- |
1007 * | state |
1008 * or
1009 * | ------------- state -------------- |
1010 *
1011 * We need to split the extent we found, and may flip bits on
1012 * second half.
1013 *
1014 * If the extent we found extends past our
1015 * range, we just split and search again. It'll get split
1016 * again the next time though.
1017 *
1018 * If the extent we found is inside our range, we set the
1019 * desired bit on it.
1020 */
1021 if (state->start < start) {
1022 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001023 if (!prealloc) {
1024 err = -ENOMEM;
1025 goto out;
1026 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001027 err = split_state(tree, state, prealloc, start);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001028 if (err)
1029 extent_io_tree_panic(tree, err);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001030 prealloc = NULL;
1031 if (err)
1032 goto out;
1033 if (state->end <= end) {
1034 set_state_bits(tree, state, &bits);
Liu Bod1ac6e42012-05-10 18:10:39 +08001035 state = clear_state_bit(tree, state, &clear_bits, 0);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001036 if (last_end == (u64)-1)
1037 goto out;
1038 start = last_end + 1;
Liu Bod1ac6e42012-05-10 18:10:39 +08001039 if (start < end && state && state->start == start &&
1040 !need_resched())
1041 goto hit_next;
Josef Bacik462d6fa2011-09-26 13:56:12 -04001042 }
1043 goto search_again;
1044 }
1045 /*
1046 * | ---- desired range ---- |
1047 * | state | or | state |
1048 *
1049 * There's a hole, we need to insert something in it and
1050 * ignore the extent we found.
1051 */
1052 if (state->start > start) {
1053 u64 this_end;
1054 if (end < last_start)
1055 this_end = end;
1056 else
1057 this_end = last_start - 1;
1058
1059 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001060 if (!prealloc) {
1061 err = -ENOMEM;
1062 goto out;
1063 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001064
1065 /*
1066 * Avoid to free 'prealloc' if it can be merged with
1067 * the later extent.
1068 */
1069 err = insert_state(tree, prealloc, start, this_end,
1070 &bits);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001071 if (err)
1072 extent_io_tree_panic(tree, err);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001073 prealloc = NULL;
1074 start = this_end + 1;
1075 goto search_again;
1076 }
1077 /*
1078 * | ---- desired range ---- |
1079 * | state |
1080 * We need to split the extent, and set the bit
1081 * on the first half
1082 */
1083 if (state->start <= end && state->end > end) {
1084 prealloc = alloc_extent_state_atomic(prealloc);
Liu Bo1cf4ffd2011-12-07 20:08:40 -05001085 if (!prealloc) {
1086 err = -ENOMEM;
1087 goto out;
1088 }
Josef Bacik462d6fa2011-09-26 13:56:12 -04001089
1090 err = split_state(tree, state, prealloc, end + 1);
Jeff Mahoneyc2d904e2011-10-03 23:22:32 -04001091 if (err)
1092 extent_io_tree_panic(tree, err);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001093
1094 set_state_bits(tree, prealloc, &bits);
1095 clear_state_bit(tree, prealloc, &clear_bits, 0);
Josef Bacik462d6fa2011-09-26 13:56:12 -04001096 prealloc = NULL;
1097 goto out;
1098 }
1099
1100 goto search_again;
1101
1102out:
1103 spin_unlock(&tree->lock);
1104 if (prealloc)
1105 free_extent_state(prealloc);
1106
1107 return err;
1108
1109search_again:
1110 if (start > end)
1111 goto out;
1112 spin_unlock(&tree->lock);
1113 if (mask & __GFP_WAIT)
1114 cond_resched();
1115 goto again;
1116}
1117
Chris Masond1310b22008-01-24 16:13:08 -05001118/* wrappers around set/clear extent bit */
1119int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1120 gfp_t mask)
1121{
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001122 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
Chris Mason2c64c532009-09-02 15:04:12 -04001123 NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001124}
Chris Masond1310b22008-01-24 16:13:08 -05001125
1126int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1127 int bits, gfp_t mask)
1128{
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001129 return set_extent_bit(tree, start, end, bits, NULL,
Chris Mason2c64c532009-09-02 15:04:12 -04001130 NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001131}
Chris Masond1310b22008-01-24 16:13:08 -05001132
1133int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1134 int bits, gfp_t mask)
1135{
Chris Mason2c64c532009-09-02 15:04:12 -04001136 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001137}
Chris Masond1310b22008-01-24 16:13:08 -05001138
1139int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
Josef Bacik2ac55d42010-02-03 19:33:23 +00001140 struct extent_state **cached_state, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05001141{
1142 return set_extent_bit(tree, start, end,
Liu Bofee187d2011-09-29 15:55:28 +08001143 EXTENT_DELALLOC | EXTENT_UPTODATE,
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001144 NULL, cached_state, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001145}
Chris Masond1310b22008-01-24 16:13:08 -05001146
Liu Bo9e8a4a82012-09-05 19:10:51 -06001147int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
1148 struct extent_state **cached_state, gfp_t mask)
1149{
1150 return set_extent_bit(tree, start, end,
1151 EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
1152 NULL, cached_state, mask);
1153}
1154
Chris Masond1310b22008-01-24 16:13:08 -05001155int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1156 gfp_t mask)
1157{
1158 return clear_extent_bit(tree, start, end,
Josef Bacik32c00af2009-10-08 13:34:05 -04001159 EXTENT_DIRTY | EXTENT_DELALLOC |
Yan, Zheng0ca1f7c2010-05-16 10:48:47 -04001160 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001161}
Chris Masond1310b22008-01-24 16:13:08 -05001162
1163int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1164 gfp_t mask)
1165{
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001166 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
Chris Mason2c64c532009-09-02 15:04:12 -04001167 NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001168}
Chris Masond1310b22008-01-24 16:13:08 -05001169
Chris Masond1310b22008-01-24 16:13:08 -05001170int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
Arne Jansen507903b2011-04-06 10:02:20 +00001171 struct extent_state **cached_state, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05001172{
Arne Jansen507903b2011-04-06 10:02:20 +00001173 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001174 cached_state, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001175}
Chris Masond1310b22008-01-24 16:13:08 -05001176
Josef Bacik5fd02042012-05-02 14:00:54 -04001177int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1178 struct extent_state **cached_state, gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05001179{
Chris Mason2c64c532009-09-02 15:04:12 -04001180 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
Josef Bacik2ac55d42010-02-03 19:33:23 +00001181 cached_state, mask);
Chris Masond1310b22008-01-24 16:13:08 -05001182}
Chris Masond1310b22008-01-24 16:13:08 -05001183
Chris Masond352ac62008-09-29 15:18:18 -04001184/*
1185 * either insert or lock state struct between start and end use mask to tell
1186 * us if waiting is desired.
1187 */
Chris Mason1edbb732009-09-02 13:24:36 -04001188int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001189 int bits, struct extent_state **cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05001190{
1191 int err;
1192 u64 failed_start;
1193 while (1) {
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001194 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1195 EXTENT_LOCKED, &failed_start,
1196 cached_state, GFP_NOFS);
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001197 if (err == -EEXIST) {
Chris Masond1310b22008-01-24 16:13:08 -05001198 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1199 start = failed_start;
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001200 } else
Chris Masond1310b22008-01-24 16:13:08 -05001201 break;
Chris Masond1310b22008-01-24 16:13:08 -05001202 WARN_ON(start > end);
1203 }
1204 return err;
1205}
Chris Masond1310b22008-01-24 16:13:08 -05001206
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001207int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
Chris Mason1edbb732009-09-02 13:24:36 -04001208{
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001209 return lock_extent_bits(tree, start, end, 0, NULL);
Chris Mason1edbb732009-09-02 13:24:36 -04001210}
1211
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001212int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
Josef Bacik25179202008-10-29 14:49:05 -04001213{
1214 int err;
1215 u64 failed_start;
1216
Jeff Mahoney3fbe5c02012-03-01 14:57:19 +01001217 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1218 &failed_start, NULL, GFP_NOFS);
Yan Zheng66435582008-10-30 14:19:50 -04001219 if (err == -EEXIST) {
1220 if (failed_start > start)
1221 clear_extent_bit(tree, start, failed_start - 1,
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001222 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
Josef Bacik25179202008-10-29 14:49:05 -04001223 return 0;
Yan Zheng66435582008-10-30 14:19:50 -04001224 }
Josef Bacik25179202008-10-29 14:49:05 -04001225 return 1;
1226}
Josef Bacik25179202008-10-29 14:49:05 -04001227
Chris Mason2c64c532009-09-02 15:04:12 -04001228int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1229 struct extent_state **cached, gfp_t mask)
1230{
1231 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1232 mask);
1233}
1234
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001235int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
Chris Masond1310b22008-01-24 16:13:08 -05001236{
Chris Mason2c64c532009-09-02 15:04:12 -04001237 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001238 GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05001239}
Chris Masond1310b22008-01-24 16:13:08 -05001240
1241/*
Chris Masond1310b22008-01-24 16:13:08 -05001242 * helper function to set both pages and extents in the tree writeback
1243 */
Christoph Hellwigb2950862008-12-02 09:54:17 -05001244static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
Chris Masond1310b22008-01-24 16:13:08 -05001245{
1246 unsigned long index = start >> PAGE_CACHE_SHIFT;
1247 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1248 struct page *page;
1249
1250 while (index <= end_index) {
1251 page = find_get_page(tree->mapping, index);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001252 BUG_ON(!page); /* Pages should be in the extent_io_tree */
Chris Masond1310b22008-01-24 16:13:08 -05001253 set_page_writeback(page);
1254 page_cache_release(page);
1255 index++;
1256 }
Chris Masond1310b22008-01-24 16:13:08 -05001257 return 0;
1258}
Chris Masond1310b22008-01-24 16:13:08 -05001259
Chris Masond352ac62008-09-29 15:18:18 -04001260/* find the first state struct with 'bits' set after 'start', and
1261 * return it. tree->lock must be held. NULL will returned if
1262 * nothing was found after 'start'
1263 */
Chris Masond7fc6402008-02-18 12:12:38 -05001264struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1265 u64 start, int bits)
1266{
1267 struct rb_node *node;
1268 struct extent_state *state;
1269
1270 /*
1271 * this search will find all the extents that end after
1272 * our range starts.
1273 */
1274 node = tree_search(tree, start);
Chris Masond3977122009-01-05 21:25:51 -05001275 if (!node)
Chris Masond7fc6402008-02-18 12:12:38 -05001276 goto out;
Chris Masond7fc6402008-02-18 12:12:38 -05001277
Chris Masond3977122009-01-05 21:25:51 -05001278 while (1) {
Chris Masond7fc6402008-02-18 12:12:38 -05001279 state = rb_entry(node, struct extent_state, rb_node);
Chris Masond3977122009-01-05 21:25:51 -05001280 if (state->end >= start && (state->state & bits))
Chris Masond7fc6402008-02-18 12:12:38 -05001281 return state;
Chris Masond3977122009-01-05 21:25:51 -05001282
Chris Masond7fc6402008-02-18 12:12:38 -05001283 node = rb_next(node);
1284 if (!node)
1285 break;
1286 }
1287out:
1288 return NULL;
1289}
Chris Masond7fc6402008-02-18 12:12:38 -05001290
Chris Masond352ac62008-09-29 15:18:18 -04001291/*
Xiao Guangrong69261c42011-07-14 03:19:45 +00001292 * find the first offset in the io tree with 'bits' set. zero is
1293 * returned if we find something, and *start_ret and *end_ret are
1294 * set to reflect the state struct that was found.
1295 *
Wang Sheng-Hui477d7ea2012-04-06 14:35:47 +08001296 * If nothing was found, 1 is returned. If found something, return 0.
Xiao Guangrong69261c42011-07-14 03:19:45 +00001297 */
1298int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1299 u64 *start_ret, u64 *end_ret, int bits)
1300{
1301 struct extent_state *state;
1302 int ret = 1;
1303
1304 spin_lock(&tree->lock);
1305 state = find_first_extent_bit_state(tree, start, bits);
1306 if (state) {
1307 *start_ret = state->start;
1308 *end_ret = state->end;
1309 ret = 0;
1310 }
1311 spin_unlock(&tree->lock);
1312 return ret;
1313}
1314
1315/*
Chris Masond352ac62008-09-29 15:18:18 -04001316 * find a contiguous range of bytes in the file marked as delalloc, not
1317 * more than 'max_bytes'. start and end are used to return the range,
1318 *
1319 * 1 is returned if we find something, 0 if nothing was in the tree
1320 */
Chris Masonc8b97812008-10-29 14:49:59 -04001321static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
Josef Bacikc2a128d2010-02-02 21:19:11 +00001322 u64 *start, u64 *end, u64 max_bytes,
1323 struct extent_state **cached_state)
Chris Masond1310b22008-01-24 16:13:08 -05001324{
1325 struct rb_node *node;
1326 struct extent_state *state;
1327 u64 cur_start = *start;
1328 u64 found = 0;
1329 u64 total_bytes = 0;
1330
Chris Masoncad321a2008-12-17 14:51:42 -05001331 spin_lock(&tree->lock);
Chris Masonc8b97812008-10-29 14:49:59 -04001332
Chris Masond1310b22008-01-24 16:13:08 -05001333 /*
1334 * this search will find all the extents that end after
1335 * our range starts.
1336 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001337 node = tree_search(tree, cur_start);
Peter2b114d12008-04-01 11:21:40 -04001338 if (!node) {
Chris Mason3b951512008-04-17 11:29:12 -04001339 if (!found)
1340 *end = (u64)-1;
Chris Masond1310b22008-01-24 16:13:08 -05001341 goto out;
1342 }
1343
Chris Masond3977122009-01-05 21:25:51 -05001344 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001345 state = rb_entry(node, struct extent_state, rb_node);
Zheng Yan5b21f2e2008-09-26 10:05:38 -04001346 if (found && (state->start != cur_start ||
1347 (state->state & EXTENT_BOUNDARY))) {
Chris Masond1310b22008-01-24 16:13:08 -05001348 goto out;
1349 }
1350 if (!(state->state & EXTENT_DELALLOC)) {
1351 if (!found)
1352 *end = state->end;
1353 goto out;
1354 }
Josef Bacikc2a128d2010-02-02 21:19:11 +00001355 if (!found) {
Chris Masond1310b22008-01-24 16:13:08 -05001356 *start = state->start;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001357 *cached_state = state;
1358 atomic_inc(&state->refs);
1359 }
Chris Masond1310b22008-01-24 16:13:08 -05001360 found++;
1361 *end = state->end;
1362 cur_start = state->end + 1;
1363 node = rb_next(node);
1364 if (!node)
1365 break;
1366 total_bytes += state->end - state->start + 1;
1367 if (total_bytes >= max_bytes)
1368 break;
1369 }
1370out:
Chris Masoncad321a2008-12-17 14:51:42 -05001371 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001372 return found;
1373}
1374
Jeff Mahoney143bede2012-03-01 14:56:26 +01001375static noinline void __unlock_for_delalloc(struct inode *inode,
1376 struct page *locked_page,
1377 u64 start, u64 end)
Chris Masonc8b97812008-10-29 14:49:59 -04001378{
1379 int ret;
1380 struct page *pages[16];
1381 unsigned long index = start >> PAGE_CACHE_SHIFT;
1382 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1383 unsigned long nr_pages = end_index - index + 1;
1384 int i;
1385
1386 if (index == locked_page->index && end_index == index)
Jeff Mahoney143bede2012-03-01 14:56:26 +01001387 return;
Chris Masonc8b97812008-10-29 14:49:59 -04001388
Chris Masond3977122009-01-05 21:25:51 -05001389 while (nr_pages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -04001390 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001391 min_t(unsigned long, nr_pages,
1392 ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -04001393 for (i = 0; i < ret; i++) {
1394 if (pages[i] != locked_page)
1395 unlock_page(pages[i]);
1396 page_cache_release(pages[i]);
1397 }
1398 nr_pages -= ret;
1399 index += ret;
1400 cond_resched();
1401 }
Chris Masonc8b97812008-10-29 14:49:59 -04001402}
1403
1404static noinline int lock_delalloc_pages(struct inode *inode,
1405 struct page *locked_page,
1406 u64 delalloc_start,
1407 u64 delalloc_end)
1408{
1409 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1410 unsigned long start_index = index;
1411 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1412 unsigned long pages_locked = 0;
1413 struct page *pages[16];
1414 unsigned long nrpages;
1415 int ret;
1416 int i;
1417
1418 /* the caller is responsible for locking the start index */
1419 if (index == locked_page->index && index == end_index)
1420 return 0;
1421
1422 /* skip the page at the start index */
1423 nrpages = end_index - index + 1;
Chris Masond3977122009-01-05 21:25:51 -05001424 while (nrpages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -04001425 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001426 min_t(unsigned long,
1427 nrpages, ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -04001428 if (ret == 0) {
1429 ret = -EAGAIN;
1430 goto done;
1431 }
1432 /* now we have an array of pages, lock them all */
1433 for (i = 0; i < ret; i++) {
1434 /*
1435 * the caller is taking responsibility for
1436 * locked_page
1437 */
Chris Mason771ed682008-11-06 22:02:51 -05001438 if (pages[i] != locked_page) {
Chris Masonc8b97812008-10-29 14:49:59 -04001439 lock_page(pages[i]);
Chris Masonf2b1c412008-11-10 07:31:30 -05001440 if (!PageDirty(pages[i]) ||
1441 pages[i]->mapping != inode->i_mapping) {
Chris Mason771ed682008-11-06 22:02:51 -05001442 ret = -EAGAIN;
1443 unlock_page(pages[i]);
1444 page_cache_release(pages[i]);
1445 goto done;
1446 }
1447 }
Chris Masonc8b97812008-10-29 14:49:59 -04001448 page_cache_release(pages[i]);
Chris Mason771ed682008-11-06 22:02:51 -05001449 pages_locked++;
Chris Masonc8b97812008-10-29 14:49:59 -04001450 }
Chris Masonc8b97812008-10-29 14:49:59 -04001451 nrpages -= ret;
1452 index += ret;
1453 cond_resched();
1454 }
1455 ret = 0;
1456done:
1457 if (ret && pages_locked) {
1458 __unlock_for_delalloc(inode, locked_page,
1459 delalloc_start,
1460 ((u64)(start_index + pages_locked - 1)) <<
1461 PAGE_CACHE_SHIFT);
1462 }
1463 return ret;
1464}
1465
1466/*
1467 * find a contiguous range of bytes in the file marked as delalloc, not
1468 * more than 'max_bytes'. start and end are used to return the range,
1469 *
1470 * 1 is returned if we find something, 0 if nothing was in the tree
1471 */
1472static noinline u64 find_lock_delalloc_range(struct inode *inode,
1473 struct extent_io_tree *tree,
1474 struct page *locked_page,
1475 u64 *start, u64 *end,
1476 u64 max_bytes)
1477{
1478 u64 delalloc_start;
1479 u64 delalloc_end;
1480 u64 found;
Chris Mason9655d292009-09-02 15:22:30 -04001481 struct extent_state *cached_state = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04001482 int ret;
1483 int loops = 0;
1484
1485again:
1486 /* step one, find a bunch of delalloc bytes starting at start */
1487 delalloc_start = *start;
1488 delalloc_end = 0;
1489 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
Josef Bacikc2a128d2010-02-02 21:19:11 +00001490 max_bytes, &cached_state);
Chris Mason70b99e62008-10-31 12:46:39 -04001491 if (!found || delalloc_end <= *start) {
Chris Masonc8b97812008-10-29 14:49:59 -04001492 *start = delalloc_start;
1493 *end = delalloc_end;
Josef Bacikc2a128d2010-02-02 21:19:11 +00001494 free_extent_state(cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001495 return found;
1496 }
1497
1498 /*
Chris Mason70b99e62008-10-31 12:46:39 -04001499 * start comes from the offset of locked_page. We have to lock
1500 * pages in order, so we can't process delalloc bytes before
1501 * locked_page
1502 */
Chris Masond3977122009-01-05 21:25:51 -05001503 if (delalloc_start < *start)
Chris Mason70b99e62008-10-31 12:46:39 -04001504 delalloc_start = *start;
Chris Mason70b99e62008-10-31 12:46:39 -04001505
1506 /*
Chris Masonc8b97812008-10-29 14:49:59 -04001507 * make sure to limit the number of pages we try to lock down
1508 * if we're looping.
1509 */
Chris Masond3977122009-01-05 21:25:51 -05001510 if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
Chris Mason771ed682008-11-06 22:02:51 -05001511 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
Chris Masond3977122009-01-05 21:25:51 -05001512
Chris Masonc8b97812008-10-29 14:49:59 -04001513 /* step two, lock all the pages after the page that has start */
1514 ret = lock_delalloc_pages(inode, locked_page,
1515 delalloc_start, delalloc_end);
1516 if (ret == -EAGAIN) {
1517 /* some of the pages are gone, lets avoid looping by
1518 * shortening the size of the delalloc range we're searching
1519 */
Chris Mason9655d292009-09-02 15:22:30 -04001520 free_extent_state(cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001521 if (!loops) {
1522 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1523 max_bytes = PAGE_CACHE_SIZE - offset;
1524 loops = 1;
1525 goto again;
1526 } else {
1527 found = 0;
1528 goto out_failed;
1529 }
1530 }
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001531 BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
Chris Masonc8b97812008-10-29 14:49:59 -04001532
1533 /* step three, lock the state bits for the whole range */
Jeff Mahoneyd0082372012-03-01 14:57:19 +01001534 lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001535
1536 /* then test to make sure it is all still delalloc */
1537 ret = test_range_bit(tree, delalloc_start, delalloc_end,
Chris Mason9655d292009-09-02 15:22:30 -04001538 EXTENT_DELALLOC, 1, cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001539 if (!ret) {
Chris Mason9655d292009-09-02 15:22:30 -04001540 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1541 &cached_state, GFP_NOFS);
Chris Masonc8b97812008-10-29 14:49:59 -04001542 __unlock_for_delalloc(inode, locked_page,
1543 delalloc_start, delalloc_end);
1544 cond_resched();
1545 goto again;
1546 }
Chris Mason9655d292009-09-02 15:22:30 -04001547 free_extent_state(cached_state);
Chris Masonc8b97812008-10-29 14:49:59 -04001548 *start = delalloc_start;
1549 *end = delalloc_end;
1550out_failed:
1551 return found;
1552}
1553
1554int extent_clear_unlock_delalloc(struct inode *inode,
1555 struct extent_io_tree *tree,
1556 u64 start, u64 end, struct page *locked_page,
Chris Masona791e352009-10-08 11:27:10 -04001557 unsigned long op)
Chris Masonc8b97812008-10-29 14:49:59 -04001558{
1559 int ret;
1560 struct page *pages[16];
1561 unsigned long index = start >> PAGE_CACHE_SHIFT;
1562 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1563 unsigned long nr_pages = end_index - index + 1;
1564 int i;
Chris Mason771ed682008-11-06 22:02:51 -05001565 int clear_bits = 0;
Chris Masonc8b97812008-10-29 14:49:59 -04001566
Chris Masona791e352009-10-08 11:27:10 -04001567 if (op & EXTENT_CLEAR_UNLOCK)
Chris Mason771ed682008-11-06 22:02:51 -05001568 clear_bits |= EXTENT_LOCKED;
Chris Masona791e352009-10-08 11:27:10 -04001569 if (op & EXTENT_CLEAR_DIRTY)
Chris Masonc8b97812008-10-29 14:49:59 -04001570 clear_bits |= EXTENT_DIRTY;
1571
Chris Masona791e352009-10-08 11:27:10 -04001572 if (op & EXTENT_CLEAR_DELALLOC)
Chris Mason771ed682008-11-06 22:02:51 -05001573 clear_bits |= EXTENT_DELALLOC;
1574
Chris Mason2c64c532009-09-02 15:04:12 -04001575 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
Josef Bacik32c00af2009-10-08 13:34:05 -04001576 if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1577 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1578 EXTENT_SET_PRIVATE2)))
Chris Mason771ed682008-11-06 22:02:51 -05001579 return 0;
Chris Masonc8b97812008-10-29 14:49:59 -04001580
Chris Masond3977122009-01-05 21:25:51 -05001581 while (nr_pages > 0) {
Chris Masonc8b97812008-10-29 14:49:59 -04001582 ret = find_get_pages_contig(inode->i_mapping, index,
Chris Mason5b050f02008-11-11 09:34:41 -05001583 min_t(unsigned long,
1584 nr_pages, ARRAY_SIZE(pages)), pages);
Chris Masonc8b97812008-10-29 14:49:59 -04001585 for (i = 0; i < ret; i++) {
Chris Mason8b62b722009-09-02 16:53:46 -04001586
Chris Masona791e352009-10-08 11:27:10 -04001587 if (op & EXTENT_SET_PRIVATE2)
Chris Mason8b62b722009-09-02 16:53:46 -04001588 SetPagePrivate2(pages[i]);
1589
Chris Masonc8b97812008-10-29 14:49:59 -04001590 if (pages[i] == locked_page) {
1591 page_cache_release(pages[i]);
1592 continue;
1593 }
Chris Masona791e352009-10-08 11:27:10 -04001594 if (op & EXTENT_CLEAR_DIRTY)
Chris Masonc8b97812008-10-29 14:49:59 -04001595 clear_page_dirty_for_io(pages[i]);
Chris Masona791e352009-10-08 11:27:10 -04001596 if (op & EXTENT_SET_WRITEBACK)
Chris Masonc8b97812008-10-29 14:49:59 -04001597 set_page_writeback(pages[i]);
Chris Masona791e352009-10-08 11:27:10 -04001598 if (op & EXTENT_END_WRITEBACK)
Chris Masonc8b97812008-10-29 14:49:59 -04001599 end_page_writeback(pages[i]);
Chris Masona791e352009-10-08 11:27:10 -04001600 if (op & EXTENT_CLEAR_UNLOCK_PAGE)
Chris Mason771ed682008-11-06 22:02:51 -05001601 unlock_page(pages[i]);
Chris Masonc8b97812008-10-29 14:49:59 -04001602 page_cache_release(pages[i]);
1603 }
1604 nr_pages -= ret;
1605 index += ret;
1606 cond_resched();
1607 }
1608 return 0;
1609}
Chris Masonc8b97812008-10-29 14:49:59 -04001610
Chris Masond352ac62008-09-29 15:18:18 -04001611/*
1612 * count the number of bytes in the tree that have a given bit(s)
1613 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1614 * cached. The total number found is returned.
1615 */
Chris Masond1310b22008-01-24 16:13:08 -05001616u64 count_range_bits(struct extent_io_tree *tree,
1617 u64 *start, u64 search_end, u64 max_bytes,
Chris Masonec29ed52011-02-23 16:23:20 -05001618 unsigned long bits, int contig)
Chris Masond1310b22008-01-24 16:13:08 -05001619{
1620 struct rb_node *node;
1621 struct extent_state *state;
1622 u64 cur_start = *start;
1623 u64 total_bytes = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05001624 u64 last = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001625 int found = 0;
1626
1627 if (search_end <= cur_start) {
Chris Masond1310b22008-01-24 16:13:08 -05001628 WARN_ON(1);
1629 return 0;
1630 }
1631
Chris Masoncad321a2008-12-17 14:51:42 -05001632 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001633 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1634 total_bytes = tree->dirty_bytes;
1635 goto out;
1636 }
1637 /*
1638 * this search will find all the extents that end after
1639 * our range starts.
1640 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001641 node = tree_search(tree, cur_start);
Chris Masond3977122009-01-05 21:25:51 -05001642 if (!node)
Chris Masond1310b22008-01-24 16:13:08 -05001643 goto out;
Chris Masond1310b22008-01-24 16:13:08 -05001644
Chris Masond3977122009-01-05 21:25:51 -05001645 while (1) {
Chris Masond1310b22008-01-24 16:13:08 -05001646 state = rb_entry(node, struct extent_state, rb_node);
1647 if (state->start > search_end)
1648 break;
Chris Masonec29ed52011-02-23 16:23:20 -05001649 if (contig && found && state->start > last + 1)
1650 break;
1651 if (state->end >= cur_start && (state->state & bits) == bits) {
Chris Masond1310b22008-01-24 16:13:08 -05001652 total_bytes += min(search_end, state->end) + 1 -
1653 max(cur_start, state->start);
1654 if (total_bytes >= max_bytes)
1655 break;
1656 if (!found) {
Josef Bacikaf60bed2011-05-04 11:11:17 -04001657 *start = max(cur_start, state->start);
Chris Masond1310b22008-01-24 16:13:08 -05001658 found = 1;
1659 }
Chris Masonec29ed52011-02-23 16:23:20 -05001660 last = state->end;
1661 } else if (contig && found) {
1662 break;
Chris Masond1310b22008-01-24 16:13:08 -05001663 }
1664 node = rb_next(node);
1665 if (!node)
1666 break;
1667 }
1668out:
Chris Masoncad321a2008-12-17 14:51:42 -05001669 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001670 return total_bytes;
1671}
Christoph Hellwigb2950862008-12-02 09:54:17 -05001672
Chris Masond352ac62008-09-29 15:18:18 -04001673/*
1674 * set the private field for a given byte offset in the tree. If there isn't
1675 * an extent_state there already, this does nothing.
1676 */
Chris Masond1310b22008-01-24 16:13:08 -05001677int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1678{
1679 struct rb_node *node;
1680 struct extent_state *state;
1681 int ret = 0;
1682
Chris Masoncad321a2008-12-17 14:51:42 -05001683 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001684 /*
1685 * this search will find all the extents that end after
1686 * our range starts.
1687 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001688 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001689 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001690 ret = -ENOENT;
1691 goto out;
1692 }
1693 state = rb_entry(node, struct extent_state, rb_node);
1694 if (state->start != start) {
1695 ret = -ENOENT;
1696 goto out;
1697 }
1698 state->private = private;
1699out:
Chris Masoncad321a2008-12-17 14:51:42 -05001700 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001701 return ret;
1702}
1703
1704int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1705{
1706 struct rb_node *node;
1707 struct extent_state *state;
1708 int ret = 0;
1709
Chris Masoncad321a2008-12-17 14:51:42 -05001710 spin_lock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001711 /*
1712 * this search will find all the extents that end after
1713 * our range starts.
1714 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001715 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001716 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001717 ret = -ENOENT;
1718 goto out;
1719 }
1720 state = rb_entry(node, struct extent_state, rb_node);
1721 if (state->start != start) {
1722 ret = -ENOENT;
1723 goto out;
1724 }
1725 *private = state->private;
1726out:
Chris Masoncad321a2008-12-17 14:51:42 -05001727 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001728 return ret;
1729}
1730
1731/*
1732 * searches a range in the state tree for a given mask.
Chris Mason70dec802008-01-29 09:59:12 -05001733 * If 'filled' == 1, this returns 1 only if every extent in the tree
Chris Masond1310b22008-01-24 16:13:08 -05001734 * has the bits set. Otherwise, 1 is returned if any bit in the
1735 * range is found set.
1736 */
1737int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
Chris Mason9655d292009-09-02 15:22:30 -04001738 int bits, int filled, struct extent_state *cached)
Chris Masond1310b22008-01-24 16:13:08 -05001739{
1740 struct extent_state *state = NULL;
1741 struct rb_node *node;
1742 int bitset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001743
Chris Masoncad321a2008-12-17 14:51:42 -05001744 spin_lock(&tree->lock);
Josef Bacikdf98b6e2011-06-20 14:53:48 -04001745 if (cached && cached->tree && cached->start <= start &&
1746 cached->end > start)
Chris Mason9655d292009-09-02 15:22:30 -04001747 node = &cached->rb_node;
1748 else
1749 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -05001750 while (node && start <= end) {
1751 state = rb_entry(node, struct extent_state, rb_node);
1752
1753 if (filled && state->start > start) {
1754 bitset = 0;
1755 break;
1756 }
1757
1758 if (state->start > end)
1759 break;
1760
1761 if (state->state & bits) {
1762 bitset = 1;
1763 if (!filled)
1764 break;
1765 } else if (filled) {
1766 bitset = 0;
1767 break;
1768 }
Chris Mason46562ce2009-09-23 20:23:16 -04001769
1770 if (state->end == (u64)-1)
1771 break;
1772
Chris Masond1310b22008-01-24 16:13:08 -05001773 start = state->end + 1;
1774 if (start > end)
1775 break;
1776 node = rb_next(node);
1777 if (!node) {
1778 if (filled)
1779 bitset = 0;
1780 break;
1781 }
1782 }
Chris Masoncad321a2008-12-17 14:51:42 -05001783 spin_unlock(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001784 return bitset;
1785}
Chris Masond1310b22008-01-24 16:13:08 -05001786
1787/*
1788 * helper function to set a given page up to date if all the
1789 * extents in the tree for that page are up to date
1790 */
Jeff Mahoney143bede2012-03-01 14:56:26 +01001791static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
Chris Masond1310b22008-01-24 16:13:08 -05001792{
1793 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1794 u64 end = start + PAGE_CACHE_SIZE - 1;
Chris Mason9655d292009-09-02 15:22:30 -04001795 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
Chris Masond1310b22008-01-24 16:13:08 -05001796 SetPageUptodate(page);
Chris Masond1310b22008-01-24 16:13:08 -05001797}
1798
1799/*
1800 * helper function to unlock a page if all the extents in the tree
1801 * for that page are unlocked
1802 */
Jeff Mahoney143bede2012-03-01 14:56:26 +01001803static void check_page_locked(struct extent_io_tree *tree, struct page *page)
Chris Masond1310b22008-01-24 16:13:08 -05001804{
1805 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1806 u64 end = start + PAGE_CACHE_SIZE - 1;
Chris Mason9655d292009-09-02 15:22:30 -04001807 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
Chris Masond1310b22008-01-24 16:13:08 -05001808 unlock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05001809}
1810
1811/*
1812 * helper function to end page writeback if all the extents
1813 * in the tree for that page are done with writeback
1814 */
Jeff Mahoney143bede2012-03-01 14:56:26 +01001815static void check_page_writeback(struct extent_io_tree *tree,
1816 struct page *page)
Chris Masond1310b22008-01-24 16:13:08 -05001817{
Chris Mason1edbb732009-09-02 13:24:36 -04001818 end_page_writeback(page);
Chris Masond1310b22008-01-24 16:13:08 -05001819}
1820
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001821/*
1822 * When IO fails, either with EIO or csum verification fails, we
1823 * try other mirrors that might have a good copy of the data. This
1824 * io_failure_record is used to record state as we go through all the
1825 * mirrors. If another mirror has good data, the page is set up to date
1826 * and things continue. If a good mirror can't be found, the original
1827 * bio end_io callback is called to indicate things have failed.
1828 */
1829struct io_failure_record {
1830 struct page *page;
1831 u64 start;
1832 u64 len;
1833 u64 logical;
1834 unsigned long bio_flags;
1835 int this_mirror;
1836 int failed_mirror;
1837 int in_validation;
1838};
1839
1840static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1841 int did_repair)
1842{
1843 int ret;
1844 int err = 0;
1845 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1846
1847 set_state_private(failure_tree, rec->start, 0);
1848 ret = clear_extent_bits(failure_tree, rec->start,
1849 rec->start + rec->len - 1,
1850 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1851 if (ret)
1852 err = ret;
1853
1854 if (did_repair) {
1855 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1856 rec->start + rec->len - 1,
1857 EXTENT_DAMAGED, GFP_NOFS);
1858 if (ret && !err)
1859 err = ret;
1860 }
1861
1862 kfree(rec);
1863 return err;
1864}
1865
1866static void repair_io_failure_callback(struct bio *bio, int err)
1867{
1868 complete(bio->bi_private);
1869}
1870
1871/*
1872 * this bypasses the standard btrfs submit functions deliberately, as
1873 * the standard behavior is to write all copies in a raid setup. here we only
1874 * want to write the one bad copy. so we do the mapping for ourselves and issue
1875 * submit_bio directly.
1876 * to avoid any synchonization issues, wait for the data after writing, which
1877 * actually prevents the read that triggered the error from finishing.
1878 * currently, there can be no more than two copies of every data bit. thus,
1879 * exactly one rewrite is required.
1880 */
1881int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
1882 u64 length, u64 logical, struct page *page,
1883 int mirror_num)
1884{
1885 struct bio *bio;
1886 struct btrfs_device *dev;
1887 DECLARE_COMPLETION_ONSTACK(compl);
1888 u64 map_length = 0;
1889 u64 sector;
1890 struct btrfs_bio *bbio = NULL;
1891 int ret;
1892
1893 BUG_ON(!mirror_num);
1894
1895 bio = bio_alloc(GFP_NOFS, 1);
1896 if (!bio)
1897 return -EIO;
1898 bio->bi_private = &compl;
1899 bio->bi_end_io = repair_io_failure_callback;
1900 bio->bi_size = 0;
1901 map_length = length;
1902
1903 ret = btrfs_map_block(map_tree, WRITE, logical,
1904 &map_length, &bbio, mirror_num);
1905 if (ret) {
1906 bio_put(bio);
1907 return -EIO;
1908 }
1909 BUG_ON(mirror_num != bbio->mirror_num);
1910 sector = bbio->stripes[mirror_num-1].physical >> 9;
1911 bio->bi_sector = sector;
1912 dev = bbio->stripes[mirror_num-1].dev;
1913 kfree(bbio);
1914 if (!dev || !dev->bdev || !dev->writeable) {
1915 bio_put(bio);
1916 return -EIO;
1917 }
1918 bio->bi_bdev = dev->bdev;
1919 bio_add_page(bio, page, length, start-page_offset(page));
Stefan Behrens21adbd52011-11-09 13:44:05 +01001920 btrfsic_submit_bio(WRITE_SYNC, bio);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001921 wait_for_completion(&compl);
1922
1923 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
1924 /* try to remap that extent elsewhere? */
1925 bio_put(bio);
Stefan Behrens442a4f62012-05-25 16:06:08 +02001926 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001927 return -EIO;
1928 }
1929
Anand Jaind5b025d2012-07-02 22:05:21 -06001930 printk_ratelimited_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu "
Josef Bacik606686e2012-06-04 14:03:51 -04001931 "(dev %s sector %llu)\n", page->mapping->host->i_ino,
1932 start, rcu_str_deref(dev->name), sector);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001933
1934 bio_put(bio);
1935 return 0;
1936}
1937
Josef Bacikea466792012-03-26 21:57:36 -04001938int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
1939 int mirror_num)
1940{
1941 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
1942 u64 start = eb->start;
1943 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
Chris Masond95603b2012-04-12 15:55:15 -04001944 int ret = 0;
Josef Bacikea466792012-03-26 21:57:36 -04001945
1946 for (i = 0; i < num_pages; i++) {
1947 struct page *p = extent_buffer_page(eb, i);
1948 ret = repair_io_failure(map_tree, start, PAGE_CACHE_SIZE,
1949 start, p, mirror_num);
1950 if (ret)
1951 break;
1952 start += PAGE_CACHE_SIZE;
1953 }
1954
1955 return ret;
1956}
1957
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02001958/*
1959 * each time an IO finishes, we do a fast check in the IO failure tree
1960 * to see if we need to process or clean up an io_failure_record
1961 */
1962static int clean_io_failure(u64 start, struct page *page)
1963{
1964 u64 private;
1965 u64 private_failure;
1966 struct io_failure_record *failrec;
1967 struct btrfs_mapping_tree *map_tree;
1968 struct extent_state *state;
1969 int num_copies;
1970 int did_repair = 0;
1971 int ret;
1972 struct inode *inode = page->mapping->host;
1973
1974 private = 0;
1975 ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1976 (u64)-1, 1, EXTENT_DIRTY, 0);
1977 if (!ret)
1978 return 0;
1979
1980 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
1981 &private_failure);
1982 if (ret)
1983 return 0;
1984
1985 failrec = (struct io_failure_record *)(unsigned long) private_failure;
1986 BUG_ON(!failrec->this_mirror);
1987
1988 if (failrec->in_validation) {
1989 /* there was no real error, just free the record */
1990 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
1991 failrec->start);
1992 did_repair = 1;
1993 goto out;
1994 }
1995
1996 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1997 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1998 failrec->start,
1999 EXTENT_LOCKED);
2000 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
2001
2002 if (state && state->start == failrec->start) {
2003 map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
2004 num_copies = btrfs_num_copies(map_tree, failrec->logical,
2005 failrec->len);
2006 if (num_copies > 1) {
2007 ret = repair_io_failure(map_tree, start, failrec->len,
2008 failrec->logical, page,
2009 failrec->failed_mirror);
2010 did_repair = !ret;
2011 }
2012 }
2013
2014out:
2015 if (!ret)
2016 ret = free_io_failure(inode, failrec, did_repair);
2017
2018 return ret;
2019}
2020
2021/*
2022 * this is a generic handler for readpage errors (default
2023 * readpage_io_failed_hook). if other copies exist, read those and write back
2024 * good data to the failed position. does not investigate in remapping the
2025 * failed extent elsewhere, hoping the device will be smart enough to do this as
2026 * needed
2027 */
2028
2029static int bio_readpage_error(struct bio *failed_bio, struct page *page,
2030 u64 start, u64 end, int failed_mirror,
2031 struct extent_state *state)
2032{
2033 struct io_failure_record *failrec = NULL;
2034 u64 private;
2035 struct extent_map *em;
2036 struct inode *inode = page->mapping->host;
2037 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2038 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2039 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2040 struct bio *bio;
2041 int num_copies;
2042 int ret;
2043 int read_mode;
2044 u64 logical;
2045
2046 BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2047
2048 ret = get_state_private(failure_tree, start, &private);
2049 if (ret) {
2050 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2051 if (!failrec)
2052 return -ENOMEM;
2053 failrec->start = start;
2054 failrec->len = end - start + 1;
2055 failrec->this_mirror = 0;
2056 failrec->bio_flags = 0;
2057 failrec->in_validation = 0;
2058
2059 read_lock(&em_tree->lock);
2060 em = lookup_extent_mapping(em_tree, start, failrec->len);
2061 if (!em) {
2062 read_unlock(&em_tree->lock);
2063 kfree(failrec);
2064 return -EIO;
2065 }
2066
2067 if (em->start > start || em->start + em->len < start) {
2068 free_extent_map(em);
2069 em = NULL;
2070 }
2071 read_unlock(&em_tree->lock);
2072
2073 if (!em || IS_ERR(em)) {
2074 kfree(failrec);
2075 return -EIO;
2076 }
2077 logical = start - em->start;
2078 logical = em->block_start + logical;
2079 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2080 logical = em->block_start;
2081 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2082 extent_set_compress_type(&failrec->bio_flags,
2083 em->compress_type);
2084 }
2085 pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
2086 "len=%llu\n", logical, start, failrec->len);
2087 failrec->logical = logical;
2088 free_extent_map(em);
2089
2090 /* set the bits in the private failure tree */
2091 ret = set_extent_bits(failure_tree, start, end,
2092 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2093 if (ret >= 0)
2094 ret = set_state_private(failure_tree, start,
2095 (u64)(unsigned long)failrec);
2096 /* set the bits in the inode's tree */
2097 if (ret >= 0)
2098 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2099 GFP_NOFS);
2100 if (ret < 0) {
2101 kfree(failrec);
2102 return ret;
2103 }
2104 } else {
2105 failrec = (struct io_failure_record *)(unsigned long)private;
2106 pr_debug("bio_readpage_error: (found) logical=%llu, "
2107 "start=%llu, len=%llu, validation=%d\n",
2108 failrec->logical, failrec->start, failrec->len,
2109 failrec->in_validation);
2110 /*
2111 * when data can be on disk more than twice, add to failrec here
2112 * (e.g. with a list for failed_mirror) to make
2113 * clean_io_failure() clean all those errors at once.
2114 */
2115 }
2116 num_copies = btrfs_num_copies(
2117 &BTRFS_I(inode)->root->fs_info->mapping_tree,
2118 failrec->logical, failrec->len);
2119 if (num_copies == 1) {
2120 /*
2121 * we only have a single copy of the data, so don't bother with
2122 * all the retry and error correction code that follows. no
2123 * matter what the error is, it is very likely to persist.
2124 */
2125 pr_debug("bio_readpage_error: cannot repair, num_copies == 1. "
2126 "state=%p, num_copies=%d, next_mirror %d, "
2127 "failed_mirror %d\n", state, num_copies,
2128 failrec->this_mirror, failed_mirror);
2129 free_io_failure(inode, failrec, 0);
2130 return -EIO;
2131 }
2132
2133 if (!state) {
2134 spin_lock(&tree->lock);
2135 state = find_first_extent_bit_state(tree, failrec->start,
2136 EXTENT_LOCKED);
2137 if (state && state->start != failrec->start)
2138 state = NULL;
2139 spin_unlock(&tree->lock);
2140 }
2141
2142 /*
2143 * there are two premises:
2144 * a) deliver good data to the caller
2145 * b) correct the bad sectors on disk
2146 */
2147 if (failed_bio->bi_vcnt > 1) {
2148 /*
2149 * to fulfill b), we need to know the exact failing sectors, as
2150 * we don't want to rewrite any more than the failed ones. thus,
2151 * we need separate read requests for the failed bio
2152 *
2153 * if the following BUG_ON triggers, our validation request got
2154 * merged. we need separate requests for our algorithm to work.
2155 */
2156 BUG_ON(failrec->in_validation);
2157 failrec->in_validation = 1;
2158 failrec->this_mirror = failed_mirror;
2159 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2160 } else {
2161 /*
2162 * we're ready to fulfill a) and b) alongside. get a good copy
2163 * of the failed sector and if we succeed, we have setup
2164 * everything for repair_io_failure to do the rest for us.
2165 */
2166 if (failrec->in_validation) {
2167 BUG_ON(failrec->this_mirror != failed_mirror);
2168 failrec->in_validation = 0;
2169 failrec->this_mirror = 0;
2170 }
2171 failrec->failed_mirror = failed_mirror;
2172 failrec->this_mirror++;
2173 if (failrec->this_mirror == failed_mirror)
2174 failrec->this_mirror++;
2175 read_mode = READ_SYNC;
2176 }
2177
2178 if (!state || failrec->this_mirror > num_copies) {
2179 pr_debug("bio_readpage_error: (fail) state=%p, num_copies=%d, "
2180 "next_mirror %d, failed_mirror %d\n", state,
2181 num_copies, failrec->this_mirror, failed_mirror);
2182 free_io_failure(inode, failrec, 0);
2183 return -EIO;
2184 }
2185
2186 bio = bio_alloc(GFP_NOFS, 1);
Tsutomu Itohe627ee72012-04-12 16:03:56 -04002187 if (!bio) {
2188 free_io_failure(inode, failrec, 0);
2189 return -EIO;
2190 }
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002191 bio->bi_private = state;
2192 bio->bi_end_io = failed_bio->bi_end_io;
2193 bio->bi_sector = failrec->logical >> 9;
2194 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2195 bio->bi_size = 0;
2196
2197 bio_add_page(bio, page, failrec->len, start - page_offset(page));
2198
2199 pr_debug("bio_readpage_error: submitting new read[%#x] to "
2200 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2201 failrec->this_mirror, num_copies, failrec->in_validation);
2202
Tsutomu Itoh013bd4c2012-02-16 10:11:40 +09002203 ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2204 failrec->this_mirror,
2205 failrec->bio_flags, 0);
2206 return ret;
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002207}
2208
Chris Masond1310b22008-01-24 16:13:08 -05002209/* lots and lots of room for performance fixes in the end_bio funcs */
2210
Jeff Mahoney87826df2012-02-15 16:23:57 +01002211int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2212{
2213 int uptodate = (err == 0);
2214 struct extent_io_tree *tree;
2215 int ret;
2216
2217 tree = &BTRFS_I(page->mapping->host)->io_tree;
2218
2219 if (tree->ops && tree->ops->writepage_end_io_hook) {
2220 ret = tree->ops->writepage_end_io_hook(page, start,
2221 end, NULL, uptodate);
2222 if (ret)
2223 uptodate = 0;
2224 }
2225
Jeff Mahoney87826df2012-02-15 16:23:57 +01002226 if (!uptodate) {
Jeff Mahoney87826df2012-02-15 16:23:57 +01002227 ClearPageUptodate(page);
2228 SetPageError(page);
2229 }
2230 return 0;
2231}
2232
Chris Masond1310b22008-01-24 16:13:08 -05002233/*
2234 * after a writepage IO is done, we need to:
2235 * clear the uptodate bits on error
2236 * clear the writeback bits in the extent tree for this IO
2237 * end_page_writeback if the page has no more pending IO
2238 *
2239 * Scheduling is not allowed, so the extent state tree is expected
2240 * to have one and only one object corresponding to this IO.
2241 */
Chris Masond1310b22008-01-24 16:13:08 -05002242static void end_bio_extent_writepage(struct bio *bio, int err)
Chris Masond1310b22008-01-24 16:13:08 -05002243{
Chris Masond1310b22008-01-24 16:13:08 -05002244 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
David Woodhouse902b22f2008-08-20 08:51:49 -04002245 struct extent_io_tree *tree;
Chris Masond1310b22008-01-24 16:13:08 -05002246 u64 start;
2247 u64 end;
2248 int whole_page;
2249
Chris Masond1310b22008-01-24 16:13:08 -05002250 do {
2251 struct page *page = bvec->bv_page;
David Woodhouse902b22f2008-08-20 08:51:49 -04002252 tree = &BTRFS_I(page->mapping->host)->io_tree;
2253
Chris Masond1310b22008-01-24 16:13:08 -05002254 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2255 bvec->bv_offset;
2256 end = start + bvec->bv_len - 1;
2257
2258 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2259 whole_page = 1;
2260 else
2261 whole_page = 0;
2262
2263 if (--bvec >= bio->bi_io_vec)
2264 prefetchw(&bvec->bv_page->flags);
Chris Mason1259ab72008-05-12 13:39:03 -04002265
Jeff Mahoney87826df2012-02-15 16:23:57 +01002266 if (end_extent_writepage(page, err, start, end))
2267 continue;
Chris Mason70dec802008-01-29 09:59:12 -05002268
Chris Masond1310b22008-01-24 16:13:08 -05002269 if (whole_page)
2270 end_page_writeback(page);
2271 else
2272 check_page_writeback(tree, page);
Chris Masond1310b22008-01-24 16:13:08 -05002273 } while (bvec >= bio->bi_io_vec);
Chris Mason2b1f55b2008-09-24 11:48:04 -04002274
Chris Masond1310b22008-01-24 16:13:08 -05002275 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05002276}
2277
2278/*
2279 * after a readpage IO is done, we need to:
2280 * clear the uptodate bits on error
2281 * set the uptodate bits if things worked
2282 * set the page up to date if all extents in the tree are uptodate
2283 * clear the lock bit in the extent tree
2284 * unlock the page if there are no other extents locked for it
2285 *
2286 * Scheduling is not allowed, so the extent state tree is expected
2287 * to have one and only one object corresponding to this IO.
2288 */
Chris Masond1310b22008-01-24 16:13:08 -05002289static void end_bio_extent_readpage(struct bio *bio, int err)
Chris Masond1310b22008-01-24 16:13:08 -05002290{
2291 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
Chris Mason4125bf72010-02-03 18:18:45 +00002292 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
2293 struct bio_vec *bvec = bio->bi_io_vec;
David Woodhouse902b22f2008-08-20 08:51:49 -04002294 struct extent_io_tree *tree;
Chris Masond1310b22008-01-24 16:13:08 -05002295 u64 start;
2296 u64 end;
2297 int whole_page;
Josef Bacik5cf1ab52012-04-16 09:42:26 -04002298 int mirror;
Chris Masond1310b22008-01-24 16:13:08 -05002299 int ret;
2300
Chris Masond20f7042008-12-08 16:58:54 -05002301 if (err)
2302 uptodate = 0;
2303
Chris Masond1310b22008-01-24 16:13:08 -05002304 do {
2305 struct page *page = bvec->bv_page;
Arne Jansen507903b2011-04-06 10:02:20 +00002306 struct extent_state *cached = NULL;
2307 struct extent_state *state;
2308
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002309 pr_debug("end_bio_extent_readpage: bi_vcnt=%d, idx=%d, err=%d, "
2310 "mirror=%ld\n", bio->bi_vcnt, bio->bi_idx, err,
2311 (long int)bio->bi_bdev);
David Woodhouse902b22f2008-08-20 08:51:49 -04002312 tree = &BTRFS_I(page->mapping->host)->io_tree;
2313
Chris Masond1310b22008-01-24 16:13:08 -05002314 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2315 bvec->bv_offset;
2316 end = start + bvec->bv_len - 1;
2317
2318 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2319 whole_page = 1;
2320 else
2321 whole_page = 0;
2322
Chris Mason4125bf72010-02-03 18:18:45 +00002323 if (++bvec <= bvec_end)
Chris Masond1310b22008-01-24 16:13:08 -05002324 prefetchw(&bvec->bv_page->flags);
2325
Arne Jansen507903b2011-04-06 10:02:20 +00002326 spin_lock(&tree->lock);
Chris Mason0d399202011-04-16 06:55:39 -04002327 state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
Chris Mason109b36a2011-04-12 13:57:39 -04002328 if (state && state->start == start) {
Arne Jansen507903b2011-04-06 10:02:20 +00002329 /*
2330 * take a reference on the state, unlock will drop
2331 * the ref
2332 */
2333 cache_state(state, &cached);
2334 }
2335 spin_unlock(&tree->lock);
2336
Josef Bacik5cf1ab52012-04-16 09:42:26 -04002337 mirror = (int)(unsigned long)bio->bi_bdev;
Chris Masond1310b22008-01-24 16:13:08 -05002338 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
Chris Mason70dec802008-01-29 09:59:12 -05002339 ret = tree->ops->readpage_end_io_hook(page, start, end,
Josef Bacik5cf1ab52012-04-16 09:42:26 -04002340 state, mirror);
Stefan Behrens5ee08442012-08-27 08:30:03 -06002341 if (ret)
Chris Masond1310b22008-01-24 16:13:08 -05002342 uptodate = 0;
Stefan Behrens5ee08442012-08-27 08:30:03 -06002343 else
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002344 clean_io_failure(start, page);
Chris Masond1310b22008-01-24 16:13:08 -05002345 }
Josef Bacikea466792012-03-26 21:57:36 -04002346
Josef Bacikea466792012-03-26 21:57:36 -04002347 if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
Josef Bacik5cf1ab52012-04-16 09:42:26 -04002348 ret = tree->ops->readpage_io_failed_hook(page, mirror);
Josef Bacikea466792012-03-26 21:57:36 -04002349 if (!ret && !err &&
2350 test_bit(BIO_UPTODATE, &bio->bi_flags))
2351 uptodate = 1;
2352 } else if (!uptodate) {
Jan Schmidtf4a8e652011-12-01 09:30:36 -05002353 /*
2354 * The generic bio_readpage_error handles errors the
2355 * following way: If possible, new read requests are
2356 * created and submitted and will end up in
2357 * end_bio_extent_readpage as well (if we're lucky, not
2358 * in the !uptodate case). In that case it returns 0 and
2359 * we just go on with the next page in our bio. If it
2360 * can't handle the error it will return -EIO and we
2361 * remain responsible for that page.
2362 */
Josef Bacik5cf1ab52012-04-16 09:42:26 -04002363 ret = bio_readpage_error(bio, page, start, end, mirror, NULL);
Chris Mason7e383262008-04-09 16:28:12 -04002364 if (ret == 0) {
Chris Mason3b951512008-04-17 11:29:12 -04002365 uptodate =
2366 test_bit(BIO_UPTODATE, &bio->bi_flags);
Chris Masond20f7042008-12-08 16:58:54 -05002367 if (err)
2368 uptodate = 0;
Arne Jansen507903b2011-04-06 10:02:20 +00002369 uncache_state(&cached);
Chris Mason7e383262008-04-09 16:28:12 -04002370 continue;
2371 }
2372 }
Chris Mason70dec802008-01-29 09:59:12 -05002373
Josef Bacik0b32f4b2012-03-13 09:38:00 -04002374 if (uptodate && tree->track_uptodate) {
Arne Jansen507903b2011-04-06 10:02:20 +00002375 set_extent_uptodate(tree, start, end, &cached,
David Woodhouse902b22f2008-08-20 08:51:49 -04002376 GFP_ATOMIC);
Chris Mason771ed682008-11-06 22:02:51 -05002377 }
Arne Jansen507903b2011-04-06 10:02:20 +00002378 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
Chris Masond1310b22008-01-24 16:13:08 -05002379
Chris Mason70dec802008-01-29 09:59:12 -05002380 if (whole_page) {
2381 if (uptodate) {
2382 SetPageUptodate(page);
2383 } else {
2384 ClearPageUptodate(page);
2385 SetPageError(page);
2386 }
Chris Masond1310b22008-01-24 16:13:08 -05002387 unlock_page(page);
Chris Mason70dec802008-01-29 09:59:12 -05002388 } else {
2389 if (uptodate) {
2390 check_page_uptodate(tree, page);
2391 } else {
2392 ClearPageUptodate(page);
2393 SetPageError(page);
2394 }
Chris Masond1310b22008-01-24 16:13:08 -05002395 check_page_locked(tree, page);
Chris Mason70dec802008-01-29 09:59:12 -05002396 }
Chris Mason4125bf72010-02-03 18:18:45 +00002397 } while (bvec <= bvec_end);
Chris Masond1310b22008-01-24 16:13:08 -05002398
2399 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05002400}
2401
Miao Xie88f794e2010-11-22 03:02:55 +00002402struct bio *
2403btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2404 gfp_t gfp_flags)
Chris Masond1310b22008-01-24 16:13:08 -05002405{
2406 struct bio *bio;
2407
2408 bio = bio_alloc(gfp_flags, nr_vecs);
2409
2410 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2411 while (!bio && (nr_vecs /= 2))
2412 bio = bio_alloc(gfp_flags, nr_vecs);
2413 }
2414
2415 if (bio) {
Chris Masone1c4b742008-04-22 13:26:46 -04002416 bio->bi_size = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002417 bio->bi_bdev = bdev;
2418 bio->bi_sector = first_sector;
2419 }
2420 return bio;
2421}
2422
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002423/*
2424 * Since writes are async, they will only return -ENOMEM.
2425 * Reads can return the full range of I/O error conditions.
2426 */
Jeff Mahoney355808c2011-10-03 23:23:14 -04002427static int __must_check submit_one_bio(int rw, struct bio *bio,
2428 int mirror_num, unsigned long bio_flags)
Chris Masond1310b22008-01-24 16:13:08 -05002429{
Chris Masond1310b22008-01-24 16:13:08 -05002430 int ret = 0;
Chris Mason70dec802008-01-29 09:59:12 -05002431 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2432 struct page *page = bvec->bv_page;
2433 struct extent_io_tree *tree = bio->bi_private;
Chris Mason70dec802008-01-29 09:59:12 -05002434 u64 start;
Chris Mason70dec802008-01-29 09:59:12 -05002435
2436 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
Chris Mason70dec802008-01-29 09:59:12 -05002437
David Woodhouse902b22f2008-08-20 08:51:49 -04002438 bio->bi_private = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05002439
2440 bio_get(bio);
2441
Chris Mason065631f2008-02-20 12:07:25 -05002442 if (tree->ops && tree->ops->submit_bio_hook)
liubo6b82ce82011-01-26 06:21:39 +00002443 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
Chris Masoneaf25d92010-05-25 09:48:28 -04002444 mirror_num, bio_flags, start);
Chris Mason0b86a832008-03-24 15:01:56 -04002445 else
Stefan Behrens21adbd52011-11-09 13:44:05 +01002446 btrfsic_submit_bio(rw, bio);
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02002447
Chris Masond1310b22008-01-24 16:13:08 -05002448 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2449 ret = -EOPNOTSUPP;
2450 bio_put(bio);
2451 return ret;
2452}
2453
Jeff Mahoney3444a972011-10-03 23:23:13 -04002454static int merge_bio(struct extent_io_tree *tree, struct page *page,
2455 unsigned long offset, size_t size, struct bio *bio,
2456 unsigned long bio_flags)
2457{
2458 int ret = 0;
2459 if (tree->ops && tree->ops->merge_bio_hook)
2460 ret = tree->ops->merge_bio_hook(page, offset, size, bio,
2461 bio_flags);
2462 BUG_ON(ret < 0);
2463 return ret;
2464
2465}
2466
Chris Masond1310b22008-01-24 16:13:08 -05002467static int submit_extent_page(int rw, struct extent_io_tree *tree,
2468 struct page *page, sector_t sector,
2469 size_t size, unsigned long offset,
2470 struct block_device *bdev,
2471 struct bio **bio_ret,
2472 unsigned long max_pages,
Chris Masonf1885912008-04-09 16:28:12 -04002473 bio_end_io_t end_io_func,
Chris Masonc8b97812008-10-29 14:49:59 -04002474 int mirror_num,
2475 unsigned long prev_bio_flags,
2476 unsigned long bio_flags)
Chris Masond1310b22008-01-24 16:13:08 -05002477{
2478 int ret = 0;
2479 struct bio *bio;
2480 int nr;
Chris Masonc8b97812008-10-29 14:49:59 -04002481 int contig = 0;
2482 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2483 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
Chris Mason5b050f02008-11-11 09:34:41 -05002484 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
Chris Masond1310b22008-01-24 16:13:08 -05002485
2486 if (bio_ret && *bio_ret) {
2487 bio = *bio_ret;
Chris Masonc8b97812008-10-29 14:49:59 -04002488 if (old_compressed)
2489 contig = bio->bi_sector == sector;
2490 else
2491 contig = bio->bi_sector + (bio->bi_size >> 9) ==
2492 sector;
2493
2494 if (prev_bio_flags != bio_flags || !contig ||
Jeff Mahoney3444a972011-10-03 23:23:13 -04002495 merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
Chris Masonc8b97812008-10-29 14:49:59 -04002496 bio_add_page(bio, page, page_size, offset) < page_size) {
2497 ret = submit_one_bio(rw, bio, mirror_num,
2498 prev_bio_flags);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002499 if (ret < 0)
2500 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05002501 bio = NULL;
2502 } else {
2503 return 0;
2504 }
2505 }
Chris Masonc8b97812008-10-29 14:49:59 -04002506 if (this_compressed)
2507 nr = BIO_MAX_PAGES;
2508 else
2509 nr = bio_get_nr_vecs(bdev);
2510
Miao Xie88f794e2010-11-22 03:02:55 +00002511 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
Tsutomu Itoh5df67082011-02-01 09:17:35 +00002512 if (!bio)
2513 return -ENOMEM;
Chris Mason70dec802008-01-29 09:59:12 -05002514
Chris Masonc8b97812008-10-29 14:49:59 -04002515 bio_add_page(bio, page, page_size, offset);
Chris Masond1310b22008-01-24 16:13:08 -05002516 bio->bi_end_io = end_io_func;
2517 bio->bi_private = tree;
Chris Mason70dec802008-01-29 09:59:12 -05002518
Chris Masond3977122009-01-05 21:25:51 -05002519 if (bio_ret)
Chris Masond1310b22008-01-24 16:13:08 -05002520 *bio_ret = bio;
Chris Masond3977122009-01-05 21:25:51 -05002521 else
Chris Masonc8b97812008-10-29 14:49:59 -04002522 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002523
2524 return ret;
2525}
2526
Josef Bacik4f2de97a2012-03-07 16:20:05 -05002527void attach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
2528{
2529 if (!PagePrivate(page)) {
2530 SetPagePrivate(page);
2531 page_cache_get(page);
2532 set_page_private(page, (unsigned long)eb);
2533 } else {
2534 WARN_ON(page->private != (unsigned long)eb);
2535 }
2536}
2537
Chris Masond1310b22008-01-24 16:13:08 -05002538void set_page_extent_mapped(struct page *page)
2539{
2540 if (!PagePrivate(page)) {
2541 SetPagePrivate(page);
Chris Masond1310b22008-01-24 16:13:08 -05002542 page_cache_get(page);
Chris Mason6af118ce2008-07-22 11:18:07 -04002543 set_page_private(page, EXTENT_PAGE_PRIVATE);
Chris Masond1310b22008-01-24 16:13:08 -05002544 }
2545}
2546
Chris Masond1310b22008-01-24 16:13:08 -05002547/*
2548 * basic readpage implementation. Locked extent state structs are inserted
2549 * into the tree that are removed when the IO is done (by the end_io
2550 * handlers)
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002551 * XXX JDM: This needs looking at to ensure proper page locking
Chris Masond1310b22008-01-24 16:13:08 -05002552 */
2553static int __extent_read_full_page(struct extent_io_tree *tree,
2554 struct page *page,
2555 get_extent_t *get_extent,
Chris Masonc8b97812008-10-29 14:49:59 -04002556 struct bio **bio, int mirror_num,
2557 unsigned long *bio_flags)
Chris Masond1310b22008-01-24 16:13:08 -05002558{
2559 struct inode *inode = page->mapping->host;
2560 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2561 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2562 u64 end;
2563 u64 cur = start;
2564 u64 extent_offset;
2565 u64 last_byte = i_size_read(inode);
2566 u64 block_start;
2567 u64 cur_end;
2568 sector_t sector;
2569 struct extent_map *em;
2570 struct block_device *bdev;
Josef Bacik11c65dc2010-05-23 11:07:21 -04002571 struct btrfs_ordered_extent *ordered;
Chris Masond1310b22008-01-24 16:13:08 -05002572 int ret;
2573 int nr = 0;
David Sterba306e16c2011-04-19 14:29:38 +02002574 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002575 size_t iosize;
Chris Masonc8b97812008-10-29 14:49:59 -04002576 size_t disk_io_size;
Chris Masond1310b22008-01-24 16:13:08 -05002577 size_t blocksize = inode->i_sb->s_blocksize;
Chris Masonc8b97812008-10-29 14:49:59 -04002578 unsigned long this_bio_flag = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002579
2580 set_page_extent_mapped(page);
2581
Dan Magenheimer90a887c2011-05-26 10:01:56 -06002582 if (!PageUptodate(page)) {
2583 if (cleancache_get_page(page) == 0) {
2584 BUG_ON(blocksize != PAGE_SIZE);
2585 goto out;
2586 }
2587 }
2588
Chris Masond1310b22008-01-24 16:13:08 -05002589 end = page_end;
Josef Bacik11c65dc2010-05-23 11:07:21 -04002590 while (1) {
Jeff Mahoneyd0082372012-03-01 14:57:19 +01002591 lock_extent(tree, start, end);
Josef Bacik11c65dc2010-05-23 11:07:21 -04002592 ordered = btrfs_lookup_ordered_extent(inode, start);
2593 if (!ordered)
2594 break;
Jeff Mahoneyd0082372012-03-01 14:57:19 +01002595 unlock_extent(tree, start, end);
Josef Bacik11c65dc2010-05-23 11:07:21 -04002596 btrfs_start_ordered_extent(inode, ordered, 1);
2597 btrfs_put_ordered_extent(ordered);
2598 }
Chris Masond1310b22008-01-24 16:13:08 -05002599
Chris Masonc8b97812008-10-29 14:49:59 -04002600 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2601 char *userpage;
2602 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2603
2604 if (zero_offset) {
2605 iosize = PAGE_CACHE_SIZE - zero_offset;
Cong Wang7ac687d2011-11-25 23:14:28 +08002606 userpage = kmap_atomic(page);
Chris Masonc8b97812008-10-29 14:49:59 -04002607 memset(userpage + zero_offset, 0, iosize);
2608 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08002609 kunmap_atomic(userpage);
Chris Masonc8b97812008-10-29 14:49:59 -04002610 }
2611 }
Chris Masond1310b22008-01-24 16:13:08 -05002612 while (cur <= end) {
2613 if (cur >= last_byte) {
2614 char *userpage;
Arne Jansen507903b2011-04-06 10:02:20 +00002615 struct extent_state *cached = NULL;
2616
David Sterba306e16c2011-04-19 14:29:38 +02002617 iosize = PAGE_CACHE_SIZE - pg_offset;
Cong Wang7ac687d2011-11-25 23:14:28 +08002618 userpage = kmap_atomic(page);
David Sterba306e16c2011-04-19 14:29:38 +02002619 memset(userpage + pg_offset, 0, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05002620 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08002621 kunmap_atomic(userpage);
Chris Masond1310b22008-01-24 16:13:08 -05002622 set_extent_uptodate(tree, cur, cur + iosize - 1,
Arne Jansen507903b2011-04-06 10:02:20 +00002623 &cached, GFP_NOFS);
2624 unlock_extent_cached(tree, cur, cur + iosize - 1,
2625 &cached, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05002626 break;
2627 }
David Sterba306e16c2011-04-19 14:29:38 +02002628 em = get_extent(inode, page, pg_offset, cur,
Chris Masond1310b22008-01-24 16:13:08 -05002629 end - cur + 1, 0);
David Sterbac7040052011-04-19 18:00:01 +02002630 if (IS_ERR_OR_NULL(em)) {
Chris Masond1310b22008-01-24 16:13:08 -05002631 SetPageError(page);
Jeff Mahoneyd0082372012-03-01 14:57:19 +01002632 unlock_extent(tree, cur, end);
Chris Masond1310b22008-01-24 16:13:08 -05002633 break;
2634 }
Chris Masond1310b22008-01-24 16:13:08 -05002635 extent_offset = cur - em->start;
2636 BUG_ON(extent_map_end(em) <= cur);
2637 BUG_ON(end < cur);
2638
Li Zefan261507a02010-12-17 14:21:50 +08002639 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
Chris Masonc8b97812008-10-29 14:49:59 -04002640 this_bio_flag = EXTENT_BIO_COMPRESSED;
Li Zefan261507a02010-12-17 14:21:50 +08002641 extent_set_compress_type(&this_bio_flag,
2642 em->compress_type);
2643 }
Chris Masonc8b97812008-10-29 14:49:59 -04002644
Chris Masond1310b22008-01-24 16:13:08 -05002645 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2646 cur_end = min(extent_map_end(em) - 1, end);
2647 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
Chris Masonc8b97812008-10-29 14:49:59 -04002648 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2649 disk_io_size = em->block_len;
2650 sector = em->block_start >> 9;
2651 } else {
2652 sector = (em->block_start + extent_offset) >> 9;
2653 disk_io_size = iosize;
2654 }
Chris Masond1310b22008-01-24 16:13:08 -05002655 bdev = em->bdev;
2656 block_start = em->block_start;
Yan Zhengd899e052008-10-30 14:25:28 -04002657 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2658 block_start = EXTENT_MAP_HOLE;
Chris Masond1310b22008-01-24 16:13:08 -05002659 free_extent_map(em);
2660 em = NULL;
2661
2662 /* we've found a hole, just zero and go on */
2663 if (block_start == EXTENT_MAP_HOLE) {
2664 char *userpage;
Arne Jansen507903b2011-04-06 10:02:20 +00002665 struct extent_state *cached = NULL;
2666
Cong Wang7ac687d2011-11-25 23:14:28 +08002667 userpage = kmap_atomic(page);
David Sterba306e16c2011-04-19 14:29:38 +02002668 memset(userpage + pg_offset, 0, iosize);
Chris Masond1310b22008-01-24 16:13:08 -05002669 flush_dcache_page(page);
Cong Wang7ac687d2011-11-25 23:14:28 +08002670 kunmap_atomic(userpage);
Chris Masond1310b22008-01-24 16:13:08 -05002671
2672 set_extent_uptodate(tree, cur, cur + iosize - 1,
Arne Jansen507903b2011-04-06 10:02:20 +00002673 &cached, GFP_NOFS);
2674 unlock_extent_cached(tree, cur, cur + iosize - 1,
2675 &cached, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05002676 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02002677 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002678 continue;
2679 }
2680 /* the get_extent function already copied into the page */
Chris Mason9655d292009-09-02 15:22:30 -04002681 if (test_range_bit(tree, cur, cur_end,
2682 EXTENT_UPTODATE, 1, NULL)) {
Chris Masona1b32a52008-09-05 16:09:51 -04002683 check_page_uptodate(tree, page);
Jeff Mahoneyd0082372012-03-01 14:57:19 +01002684 unlock_extent(tree, cur, cur + iosize - 1);
Chris Masond1310b22008-01-24 16:13:08 -05002685 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02002686 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002687 continue;
2688 }
Chris Mason70dec802008-01-29 09:59:12 -05002689 /* we have an inline extent but it didn't get marked up
2690 * to date. Error out
2691 */
2692 if (block_start == EXTENT_MAP_INLINE) {
2693 SetPageError(page);
Jeff Mahoneyd0082372012-03-01 14:57:19 +01002694 unlock_extent(tree, cur, cur + iosize - 1);
Chris Mason70dec802008-01-29 09:59:12 -05002695 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02002696 pg_offset += iosize;
Chris Mason70dec802008-01-29 09:59:12 -05002697 continue;
2698 }
Chris Masond1310b22008-01-24 16:13:08 -05002699
2700 ret = 0;
2701 if (tree->ops && tree->ops->readpage_io_hook) {
2702 ret = tree->ops->readpage_io_hook(page, cur,
2703 cur + iosize - 1);
2704 }
2705 if (!ret) {
Chris Mason89642222008-07-24 09:41:53 -04002706 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2707 pnr -= page->index;
Chris Masond1310b22008-01-24 16:13:08 -05002708 ret = submit_extent_page(READ, tree, page,
David Sterba306e16c2011-04-19 14:29:38 +02002709 sector, disk_io_size, pg_offset,
Chris Mason89642222008-07-24 09:41:53 -04002710 bdev, bio, pnr,
Chris Masonc8b97812008-10-29 14:49:59 -04002711 end_bio_extent_readpage, mirror_num,
2712 *bio_flags,
2713 this_bio_flag);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002714 BUG_ON(ret == -ENOMEM);
Chris Mason89642222008-07-24 09:41:53 -04002715 nr++;
Chris Masonc8b97812008-10-29 14:49:59 -04002716 *bio_flags = this_bio_flag;
Chris Masond1310b22008-01-24 16:13:08 -05002717 }
2718 if (ret)
2719 SetPageError(page);
2720 cur = cur + iosize;
David Sterba306e16c2011-04-19 14:29:38 +02002721 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002722 }
Dan Magenheimer90a887c2011-05-26 10:01:56 -06002723out:
Chris Masond1310b22008-01-24 16:13:08 -05002724 if (!nr) {
2725 if (!PageError(page))
2726 SetPageUptodate(page);
2727 unlock_page(page);
2728 }
2729 return 0;
2730}
2731
2732int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
Jan Schmidt8ddc7d92011-06-13 20:02:58 +02002733 get_extent_t *get_extent, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05002734{
2735 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04002736 unsigned long bio_flags = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002737 int ret;
2738
Jan Schmidt8ddc7d92011-06-13 20:02:58 +02002739 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
Chris Masonc8b97812008-10-29 14:49:59 -04002740 &bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002741 if (bio)
Jan Schmidt8ddc7d92011-06-13 20:02:58 +02002742 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002743 return ret;
2744}
Chris Masond1310b22008-01-24 16:13:08 -05002745
Chris Mason11c83492009-04-20 15:50:09 -04002746static noinline void update_nr_written(struct page *page,
2747 struct writeback_control *wbc,
2748 unsigned long nr_written)
2749{
2750 wbc->nr_to_write -= nr_written;
2751 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2752 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2753 page->mapping->writeback_index = page->index + nr_written;
2754}
2755
Chris Masond1310b22008-01-24 16:13:08 -05002756/*
2757 * the writepage semantics are similar to regular writepage. extent
2758 * records are inserted to lock ranges in the tree, and as dirty areas
2759 * are found, they are marked writeback. Then the lock bits are removed
2760 * and the end_io handler clears the writeback ranges
2761 */
2762static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2763 void *data)
2764{
2765 struct inode *inode = page->mapping->host;
2766 struct extent_page_data *epd = data;
2767 struct extent_io_tree *tree = epd->tree;
2768 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2769 u64 delalloc_start;
2770 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2771 u64 end;
2772 u64 cur = start;
2773 u64 extent_offset;
2774 u64 last_byte = i_size_read(inode);
2775 u64 block_start;
2776 u64 iosize;
2777 sector_t sector;
Chris Mason2c64c532009-09-02 15:04:12 -04002778 struct extent_state *cached_state = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05002779 struct extent_map *em;
2780 struct block_device *bdev;
2781 int ret;
2782 int nr = 0;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002783 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002784 size_t blocksize;
2785 loff_t i_size = i_size_read(inode);
2786 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2787 u64 nr_delalloc;
2788 u64 delalloc_end;
Chris Masonc8b97812008-10-29 14:49:59 -04002789 int page_started;
2790 int compressed;
Chris Masonffbd5172009-04-20 15:50:09 -04002791 int write_flags;
Chris Mason771ed682008-11-06 22:02:51 -05002792 unsigned long nr_written = 0;
Josef Bacik9e487102011-08-01 12:08:18 -04002793 bool fill_delalloc = true;
Chris Masond1310b22008-01-24 16:13:08 -05002794
Chris Masonffbd5172009-04-20 15:50:09 -04002795 if (wbc->sync_mode == WB_SYNC_ALL)
Jens Axboe721a9602011-03-09 11:56:30 +01002796 write_flags = WRITE_SYNC;
Chris Masonffbd5172009-04-20 15:50:09 -04002797 else
2798 write_flags = WRITE;
2799
liubo1abe9b82011-03-24 11:18:59 +00002800 trace___extent_writepage(page, inode, wbc);
2801
Chris Masond1310b22008-01-24 16:13:08 -05002802 WARN_ON(!PageLocked(page));
Chris Masonbf0da8c2011-11-04 12:29:37 -04002803
2804 ClearPageError(page);
2805
Chris Mason7f3c74f2008-07-18 12:01:11 -04002806 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
Chris Mason211c17f2008-05-15 09:13:45 -04002807 if (page->index > end_index ||
Chris Mason7f3c74f2008-07-18 12:01:11 -04002808 (page->index == end_index && !pg_offset)) {
Chris Mason39be25c2008-11-10 11:50:50 -05002809 page->mapping->a_ops->invalidatepage(page, 0);
Chris Masond1310b22008-01-24 16:13:08 -05002810 unlock_page(page);
2811 return 0;
2812 }
2813
2814 if (page->index == end_index) {
2815 char *userpage;
2816
Cong Wang7ac687d2011-11-25 23:14:28 +08002817 userpage = kmap_atomic(page);
Chris Mason7f3c74f2008-07-18 12:01:11 -04002818 memset(userpage + pg_offset, 0,
2819 PAGE_CACHE_SIZE - pg_offset);
Cong Wang7ac687d2011-11-25 23:14:28 +08002820 kunmap_atomic(userpage);
Chris Mason211c17f2008-05-15 09:13:45 -04002821 flush_dcache_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05002822 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04002823 pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002824
2825 set_page_extent_mapped(page);
2826
Josef Bacik9e487102011-08-01 12:08:18 -04002827 if (!tree->ops || !tree->ops->fill_delalloc)
2828 fill_delalloc = false;
2829
Chris Masond1310b22008-01-24 16:13:08 -05002830 delalloc_start = start;
2831 delalloc_end = 0;
Chris Masonc8b97812008-10-29 14:49:59 -04002832 page_started = 0;
Josef Bacik9e487102011-08-01 12:08:18 -04002833 if (!epd->extent_locked && fill_delalloc) {
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002834 u64 delalloc_to_write = 0;
Chris Mason11c83492009-04-20 15:50:09 -04002835 /*
2836 * make sure the wbc mapping index is at least updated
2837 * to this page.
2838 */
2839 update_nr_written(page, wbc, 0);
2840
Chris Masond3977122009-01-05 21:25:51 -05002841 while (delalloc_end < page_end) {
Chris Mason771ed682008-11-06 22:02:51 -05002842 nr_delalloc = find_lock_delalloc_range(inode, tree,
Chris Masonc8b97812008-10-29 14:49:59 -04002843 page,
2844 &delalloc_start,
Chris Masond1310b22008-01-24 16:13:08 -05002845 &delalloc_end,
2846 128 * 1024 * 1024);
Chris Mason771ed682008-11-06 22:02:51 -05002847 if (nr_delalloc == 0) {
2848 delalloc_start = delalloc_end + 1;
2849 continue;
2850 }
Tsutomu Itoh013bd4c2012-02-16 10:11:40 +09002851 ret = tree->ops->fill_delalloc(inode, page,
2852 delalloc_start,
2853 delalloc_end,
2854 &page_started,
2855 &nr_written);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002856 /* File system has been set read-only */
2857 if (ret) {
2858 SetPageError(page);
2859 goto done;
2860 }
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002861 /*
2862 * delalloc_end is already one less than the total
2863 * length, so we don't subtract one from
2864 * PAGE_CACHE_SIZE
2865 */
2866 delalloc_to_write += (delalloc_end - delalloc_start +
2867 PAGE_CACHE_SIZE) >>
2868 PAGE_CACHE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05002869 delalloc_start = delalloc_end + 1;
Chris Masond1310b22008-01-24 16:13:08 -05002870 }
Chris Masonf85d7d6c2009-09-18 16:03:16 -04002871 if (wbc->nr_to_write < delalloc_to_write) {
2872 int thresh = 8192;
2873
2874 if (delalloc_to_write < thresh * 2)
2875 thresh = delalloc_to_write;
2876 wbc->nr_to_write = min_t(u64, delalloc_to_write,
2877 thresh);
2878 }
Chris Masonc8b97812008-10-29 14:49:59 -04002879
Chris Mason771ed682008-11-06 22:02:51 -05002880 /* did the fill delalloc function already unlock and start
2881 * the IO?
2882 */
2883 if (page_started) {
2884 ret = 0;
Chris Mason11c83492009-04-20 15:50:09 -04002885 /*
2886 * we've unlocked the page, so we can't update
2887 * the mapping's writeback index, just update
2888 * nr_to_write.
2889 */
2890 wbc->nr_to_write -= nr_written;
2891 goto done_unlocked;
Chris Mason771ed682008-11-06 22:02:51 -05002892 }
Chris Masonc8b97812008-10-29 14:49:59 -04002893 }
Chris Mason247e7432008-07-17 12:53:51 -04002894 if (tree->ops && tree->ops->writepage_start_hook) {
Chris Masonc8b97812008-10-29 14:49:59 -04002895 ret = tree->ops->writepage_start_hook(page, start,
2896 page_end);
Jeff Mahoney87826df2012-02-15 16:23:57 +01002897 if (ret) {
2898 /* Fixup worker will requeue */
2899 if (ret == -EBUSY)
2900 wbc->pages_skipped++;
2901 else
2902 redirty_page_for_writepage(wbc, page);
Chris Mason11c83492009-04-20 15:50:09 -04002903 update_nr_written(page, wbc, nr_written);
Chris Mason247e7432008-07-17 12:53:51 -04002904 unlock_page(page);
Chris Mason771ed682008-11-06 22:02:51 -05002905 ret = 0;
Chris Mason11c83492009-04-20 15:50:09 -04002906 goto done_unlocked;
Chris Mason247e7432008-07-17 12:53:51 -04002907 }
2908 }
2909
Chris Mason11c83492009-04-20 15:50:09 -04002910 /*
2911 * we don't want to touch the inode after unlocking the page,
2912 * so we update the mapping writeback index now
2913 */
2914 update_nr_written(page, wbc, nr_written + 1);
Chris Mason771ed682008-11-06 22:02:51 -05002915
Chris Masond1310b22008-01-24 16:13:08 -05002916 end = page_end;
Chris Masond1310b22008-01-24 16:13:08 -05002917 if (last_byte <= start) {
Chris Masone6dcd2d2008-07-17 12:53:50 -04002918 if (tree->ops && tree->ops->writepage_end_io_hook)
2919 tree->ops->writepage_end_io_hook(page, start,
2920 page_end, NULL, 1);
Chris Masond1310b22008-01-24 16:13:08 -05002921 goto done;
2922 }
2923
Chris Masond1310b22008-01-24 16:13:08 -05002924 blocksize = inode->i_sb->s_blocksize;
2925
2926 while (cur <= end) {
2927 if (cur >= last_byte) {
Chris Masone6dcd2d2008-07-17 12:53:50 -04002928 if (tree->ops && tree->ops->writepage_end_io_hook)
2929 tree->ops->writepage_end_io_hook(page, cur,
2930 page_end, NULL, 1);
Chris Masond1310b22008-01-24 16:13:08 -05002931 break;
2932 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04002933 em = epd->get_extent(inode, page, pg_offset, cur,
Chris Masond1310b22008-01-24 16:13:08 -05002934 end - cur + 1, 1);
David Sterbac7040052011-04-19 18:00:01 +02002935 if (IS_ERR_OR_NULL(em)) {
Chris Masond1310b22008-01-24 16:13:08 -05002936 SetPageError(page);
2937 break;
2938 }
2939
2940 extent_offset = cur - em->start;
2941 BUG_ON(extent_map_end(em) <= cur);
2942 BUG_ON(end < cur);
2943 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2944 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2945 sector = (em->block_start + extent_offset) >> 9;
2946 bdev = em->bdev;
2947 block_start = em->block_start;
Chris Masonc8b97812008-10-29 14:49:59 -04002948 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
Chris Masond1310b22008-01-24 16:13:08 -05002949 free_extent_map(em);
2950 em = NULL;
2951
Chris Masonc8b97812008-10-29 14:49:59 -04002952 /*
2953 * compressed and inline extents are written through other
2954 * paths in the FS
2955 */
2956 if (compressed || block_start == EXTENT_MAP_HOLE ||
Chris Masond1310b22008-01-24 16:13:08 -05002957 block_start == EXTENT_MAP_INLINE) {
Chris Masonc8b97812008-10-29 14:49:59 -04002958 /*
2959 * end_io notification does not happen here for
2960 * compressed extents
2961 */
2962 if (!compressed && tree->ops &&
2963 tree->ops->writepage_end_io_hook)
Chris Masone6dcd2d2008-07-17 12:53:50 -04002964 tree->ops->writepage_end_io_hook(page, cur,
2965 cur + iosize - 1,
2966 NULL, 1);
Chris Masonc8b97812008-10-29 14:49:59 -04002967 else if (compressed) {
2968 /* we don't want to end_page_writeback on
2969 * a compressed extent. this happens
2970 * elsewhere
2971 */
2972 nr++;
2973 }
2974
2975 cur += iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002976 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002977 continue;
2978 }
Chris Masond1310b22008-01-24 16:13:08 -05002979 /* leave this out until we have a page_mkwrite call */
2980 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
Chris Mason9655d292009-09-02 15:22:30 -04002981 EXTENT_DIRTY, 0, NULL)) {
Chris Masond1310b22008-01-24 16:13:08 -05002982 cur = cur + iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002983 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002984 continue;
2985 }
Chris Masonc8b97812008-10-29 14:49:59 -04002986
Chris Masond1310b22008-01-24 16:13:08 -05002987 if (tree->ops && tree->ops->writepage_io_hook) {
2988 ret = tree->ops->writepage_io_hook(page, cur,
2989 cur + iosize - 1);
2990 } else {
2991 ret = 0;
2992 }
Chris Mason1259ab72008-05-12 13:39:03 -04002993 if (ret) {
Chris Masond1310b22008-01-24 16:13:08 -05002994 SetPageError(page);
Chris Mason1259ab72008-05-12 13:39:03 -04002995 } else {
Chris Masond1310b22008-01-24 16:13:08 -05002996 unsigned long max_nr = end_index + 1;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002997
Chris Masond1310b22008-01-24 16:13:08 -05002998 set_range_writeback(tree, cur, cur + iosize - 1);
2999 if (!PageWriteback(page)) {
Chris Masond3977122009-01-05 21:25:51 -05003000 printk(KERN_ERR "btrfs warning page %lu not "
3001 "writeback, cur %llu end %llu\n",
3002 page->index, (unsigned long long)cur,
Chris Masond1310b22008-01-24 16:13:08 -05003003 (unsigned long long)end);
3004 }
3005
Chris Masonffbd5172009-04-20 15:50:09 -04003006 ret = submit_extent_page(write_flags, tree, page,
3007 sector, iosize, pg_offset,
3008 bdev, &epd->bio, max_nr,
Chris Masonc8b97812008-10-29 14:49:59 -04003009 end_bio_extent_writepage,
3010 0, 0, 0);
Chris Masond1310b22008-01-24 16:13:08 -05003011 if (ret)
3012 SetPageError(page);
3013 }
3014 cur = cur + iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04003015 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05003016 nr++;
3017 }
3018done:
3019 if (nr == 0) {
3020 /* make sure the mapping tag for page dirty gets cleared */
3021 set_page_writeback(page);
3022 end_page_writeback(page);
3023 }
Chris Masond1310b22008-01-24 16:13:08 -05003024 unlock_page(page);
Chris Mason771ed682008-11-06 22:02:51 -05003025
Chris Mason11c83492009-04-20 15:50:09 -04003026done_unlocked:
3027
Chris Mason2c64c532009-09-02 15:04:12 -04003028 /* drop our reference on any cached states */
3029 free_extent_state(cached_state);
Chris Masond1310b22008-01-24 16:13:08 -05003030 return 0;
3031}
3032
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003033static int eb_wait(void *word)
3034{
3035 io_schedule();
3036 return 0;
3037}
3038
3039static void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3040{
3041 wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
3042 TASK_UNINTERRUPTIBLE);
3043}
3044
3045static int lock_extent_buffer_for_io(struct extent_buffer *eb,
3046 struct btrfs_fs_info *fs_info,
3047 struct extent_page_data *epd)
3048{
3049 unsigned long i, num_pages;
3050 int flush = 0;
3051 int ret = 0;
3052
3053 if (!btrfs_try_tree_write_lock(eb)) {
3054 flush = 1;
3055 flush_write_bio(epd);
3056 btrfs_tree_lock(eb);
3057 }
3058
3059 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3060 btrfs_tree_unlock(eb);
3061 if (!epd->sync_io)
3062 return 0;
3063 if (!flush) {
3064 flush_write_bio(epd);
3065 flush = 1;
3066 }
Chris Masona098d8e2012-03-21 12:09:56 -04003067 while (1) {
3068 wait_on_extent_buffer_writeback(eb);
3069 btrfs_tree_lock(eb);
3070 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3071 break;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003072 btrfs_tree_unlock(eb);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003073 }
3074 }
3075
Josef Bacik51561ff2012-07-20 16:25:24 -04003076 /*
3077 * We need to do this to prevent races in people who check if the eb is
3078 * under IO since we can end up having no IO bits set for a short period
3079 * of time.
3080 */
3081 spin_lock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003082 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3083 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
Josef Bacik51561ff2012-07-20 16:25:24 -04003084 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003085 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3086 spin_lock(&fs_info->delalloc_lock);
3087 if (fs_info->dirty_metadata_bytes >= eb->len)
3088 fs_info->dirty_metadata_bytes -= eb->len;
3089 else
3090 WARN_ON(1);
3091 spin_unlock(&fs_info->delalloc_lock);
3092 ret = 1;
Josef Bacik51561ff2012-07-20 16:25:24 -04003093 } else {
3094 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003095 }
3096
3097 btrfs_tree_unlock(eb);
3098
3099 if (!ret)
3100 return ret;
3101
3102 num_pages = num_extent_pages(eb->start, eb->len);
3103 for (i = 0; i < num_pages; i++) {
3104 struct page *p = extent_buffer_page(eb, i);
3105
3106 if (!trylock_page(p)) {
3107 if (!flush) {
3108 flush_write_bio(epd);
3109 flush = 1;
3110 }
3111 lock_page(p);
3112 }
3113 }
3114
3115 return ret;
3116}
3117
3118static void end_extent_buffer_writeback(struct extent_buffer *eb)
3119{
3120 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3121 smp_mb__after_clear_bit();
3122 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3123}
3124
3125static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3126{
3127 int uptodate = err == 0;
3128 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
3129 struct extent_buffer *eb;
3130 int done;
3131
3132 do {
3133 struct page *page = bvec->bv_page;
3134
3135 bvec--;
3136 eb = (struct extent_buffer *)page->private;
3137 BUG_ON(!eb);
3138 done = atomic_dec_and_test(&eb->io_pages);
3139
3140 if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
3141 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3142 ClearPageUptodate(page);
3143 SetPageError(page);
3144 }
3145
3146 end_page_writeback(page);
3147
3148 if (!done)
3149 continue;
3150
3151 end_extent_buffer_writeback(eb);
3152 } while (bvec >= bio->bi_io_vec);
3153
3154 bio_put(bio);
3155
3156}
3157
3158static int write_one_eb(struct extent_buffer *eb,
3159 struct btrfs_fs_info *fs_info,
3160 struct writeback_control *wbc,
3161 struct extent_page_data *epd)
3162{
3163 struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3164 u64 offset = eb->start;
3165 unsigned long i, num_pages;
3166 int rw = (epd->sync_io ? WRITE_SYNC : WRITE);
Josef Bacikd7dbe9e2012-04-23 14:00:51 -04003167 int ret = 0;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003168
3169 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3170 num_pages = num_extent_pages(eb->start, eb->len);
3171 atomic_set(&eb->io_pages, num_pages);
3172 for (i = 0; i < num_pages; i++) {
3173 struct page *p = extent_buffer_page(eb, i);
3174
3175 clear_page_dirty_for_io(p);
3176 set_page_writeback(p);
3177 ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
3178 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3179 -1, end_bio_extent_buffer_writepage,
3180 0, 0, 0);
3181 if (ret) {
3182 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3183 SetPageError(p);
3184 if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3185 end_extent_buffer_writeback(eb);
3186 ret = -EIO;
3187 break;
3188 }
3189 offset += PAGE_CACHE_SIZE;
3190 update_nr_written(p, wbc, 1);
3191 unlock_page(p);
3192 }
3193
3194 if (unlikely(ret)) {
3195 for (; i < num_pages; i++) {
3196 struct page *p = extent_buffer_page(eb, i);
3197 unlock_page(p);
3198 }
3199 }
3200
3201 return ret;
3202}
3203
3204int btree_write_cache_pages(struct address_space *mapping,
3205 struct writeback_control *wbc)
3206{
3207 struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3208 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3209 struct extent_buffer *eb, *prev_eb = NULL;
3210 struct extent_page_data epd = {
3211 .bio = NULL,
3212 .tree = tree,
3213 .extent_locked = 0,
3214 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3215 };
3216 int ret = 0;
3217 int done = 0;
3218 int nr_to_write_done = 0;
3219 struct pagevec pvec;
3220 int nr_pages;
3221 pgoff_t index;
3222 pgoff_t end; /* Inclusive */
3223 int scanned = 0;
3224 int tag;
3225
3226 pagevec_init(&pvec, 0);
3227 if (wbc->range_cyclic) {
3228 index = mapping->writeback_index; /* Start from prev offset */
3229 end = -1;
3230 } else {
3231 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3232 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3233 scanned = 1;
3234 }
3235 if (wbc->sync_mode == WB_SYNC_ALL)
3236 tag = PAGECACHE_TAG_TOWRITE;
3237 else
3238 tag = PAGECACHE_TAG_DIRTY;
3239retry:
3240 if (wbc->sync_mode == WB_SYNC_ALL)
3241 tag_pages_for_writeback(mapping, index, end);
3242 while (!done && !nr_to_write_done && (index <= end) &&
3243 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3244 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3245 unsigned i;
3246
3247 scanned = 1;
3248 for (i = 0; i < nr_pages; i++) {
3249 struct page *page = pvec.pages[i];
3250
3251 if (!PagePrivate(page))
3252 continue;
3253
3254 if (!wbc->range_cyclic && page->index > end) {
3255 done = 1;
3256 break;
3257 }
3258
3259 eb = (struct extent_buffer *)page->private;
3260 if (!eb) {
3261 WARN_ON(1);
3262 continue;
3263 }
3264
3265 if (eb == prev_eb)
3266 continue;
3267
3268 if (!atomic_inc_not_zero(&eb->refs)) {
3269 WARN_ON(1);
3270 continue;
3271 }
3272
3273 prev_eb = eb;
3274 ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3275 if (!ret) {
3276 free_extent_buffer(eb);
3277 continue;
3278 }
3279
3280 ret = write_one_eb(eb, fs_info, wbc, &epd);
3281 if (ret) {
3282 done = 1;
3283 free_extent_buffer(eb);
3284 break;
3285 }
3286 free_extent_buffer(eb);
3287
3288 /*
3289 * the filesystem may choose to bump up nr_to_write.
3290 * We have to make sure to honor the new nr_to_write
3291 * at any time
3292 */
3293 nr_to_write_done = wbc->nr_to_write <= 0;
3294 }
3295 pagevec_release(&pvec);
3296 cond_resched();
3297 }
3298 if (!scanned && !done) {
3299 /*
3300 * We hit the last page and there is more work to be done: wrap
3301 * back to the start of the file
3302 */
3303 scanned = 1;
3304 index = 0;
3305 goto retry;
3306 }
3307 flush_write_bio(&epd);
3308 return ret;
3309}
3310
Chris Masond1310b22008-01-24 16:13:08 -05003311/**
Chris Mason4bef0842008-09-08 11:18:08 -04003312 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
Chris Masond1310b22008-01-24 16:13:08 -05003313 * @mapping: address space structure to write
3314 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3315 * @writepage: function called for each page
3316 * @data: data passed to writepage function
3317 *
3318 * If a page is already under I/O, write_cache_pages() skips it, even
3319 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
3320 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
3321 * and msync() need to guarantee that all the data which was dirty at the time
3322 * the call was made get new I/O started against them. If wbc->sync_mode is
3323 * WB_SYNC_ALL then we were called for data integrity and we must wait for
3324 * existing IO to complete.
3325 */
Christoph Hellwigb2950862008-12-02 09:54:17 -05003326static int extent_write_cache_pages(struct extent_io_tree *tree,
Chris Mason4bef0842008-09-08 11:18:08 -04003327 struct address_space *mapping,
3328 struct writeback_control *wbc,
Chris Masond2c3f4f2008-11-19 12:44:22 -05003329 writepage_t writepage, void *data,
3330 void (*flush_fn)(void *))
Chris Masond1310b22008-01-24 16:13:08 -05003331{
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04003332 struct inode *inode = mapping->host;
Chris Masond1310b22008-01-24 16:13:08 -05003333 int ret = 0;
3334 int done = 0;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04003335 int nr_to_write_done = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003336 struct pagevec pvec;
3337 int nr_pages;
3338 pgoff_t index;
3339 pgoff_t end; /* Inclusive */
3340 int scanned = 0;
Josef Bacikf7aaa062011-07-15 21:26:38 +00003341 int tag;
Chris Masond1310b22008-01-24 16:13:08 -05003342
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04003343 /*
3344 * We have to hold onto the inode so that ordered extents can do their
3345 * work when the IO finishes. The alternative to this is failing to add
3346 * an ordered extent if the igrab() fails there and that is a huge pain
3347 * to deal with, so instead just hold onto the inode throughout the
3348 * writepages operation. If it fails here we are freeing up the inode
3349 * anyway and we'd rather not waste our time writing out stuff that is
3350 * going to be truncated anyway.
3351 */
3352 if (!igrab(inode))
3353 return 0;
3354
Chris Masond1310b22008-01-24 16:13:08 -05003355 pagevec_init(&pvec, 0);
3356 if (wbc->range_cyclic) {
3357 index = mapping->writeback_index; /* Start from prev offset */
3358 end = -1;
3359 } else {
3360 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3361 end = wbc->range_end >> PAGE_CACHE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05003362 scanned = 1;
3363 }
Josef Bacikf7aaa062011-07-15 21:26:38 +00003364 if (wbc->sync_mode == WB_SYNC_ALL)
3365 tag = PAGECACHE_TAG_TOWRITE;
3366 else
3367 tag = PAGECACHE_TAG_DIRTY;
Chris Masond1310b22008-01-24 16:13:08 -05003368retry:
Josef Bacikf7aaa062011-07-15 21:26:38 +00003369 if (wbc->sync_mode == WB_SYNC_ALL)
3370 tag_pages_for_writeback(mapping, index, end);
Chris Masonf85d7d6c2009-09-18 16:03:16 -04003371 while (!done && !nr_to_write_done && (index <= end) &&
Josef Bacikf7aaa062011-07-15 21:26:38 +00003372 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3373 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
Chris Masond1310b22008-01-24 16:13:08 -05003374 unsigned i;
3375
3376 scanned = 1;
3377 for (i = 0; i < nr_pages; i++) {
3378 struct page *page = pvec.pages[i];
3379
3380 /*
3381 * At this point we hold neither mapping->tree_lock nor
3382 * lock on the page itself: the page may be truncated or
3383 * invalidated (changing page->mapping to NULL), or even
3384 * swizzled back from swapper_space to tmpfs file
3385 * mapping
3386 */
Chris Mason01d658f2011-11-01 10:08:06 -04003387 if (tree->ops &&
3388 tree->ops->write_cache_pages_lock_hook) {
3389 tree->ops->write_cache_pages_lock_hook(page,
3390 data, flush_fn);
3391 } else {
3392 if (!trylock_page(page)) {
3393 flush_fn(data);
3394 lock_page(page);
3395 }
3396 }
Chris Masond1310b22008-01-24 16:13:08 -05003397
3398 if (unlikely(page->mapping != mapping)) {
3399 unlock_page(page);
3400 continue;
3401 }
3402
3403 if (!wbc->range_cyclic && page->index > end) {
3404 done = 1;
3405 unlock_page(page);
3406 continue;
3407 }
3408
Chris Masond2c3f4f2008-11-19 12:44:22 -05003409 if (wbc->sync_mode != WB_SYNC_NONE) {
Chris Mason0e6bd952008-11-20 10:46:35 -05003410 if (PageWriteback(page))
3411 flush_fn(data);
Chris Masond1310b22008-01-24 16:13:08 -05003412 wait_on_page_writeback(page);
Chris Masond2c3f4f2008-11-19 12:44:22 -05003413 }
Chris Masond1310b22008-01-24 16:13:08 -05003414
3415 if (PageWriteback(page) ||
3416 !clear_page_dirty_for_io(page)) {
3417 unlock_page(page);
3418 continue;
3419 }
3420
3421 ret = (*writepage)(page, wbc, data);
3422
3423 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3424 unlock_page(page);
3425 ret = 0;
3426 }
Chris Masonf85d7d6c2009-09-18 16:03:16 -04003427 if (ret)
Chris Masond1310b22008-01-24 16:13:08 -05003428 done = 1;
Chris Masonf85d7d6c2009-09-18 16:03:16 -04003429
3430 /*
3431 * the filesystem may choose to bump up nr_to_write.
3432 * We have to make sure to honor the new nr_to_write
3433 * at any time
3434 */
3435 nr_to_write_done = wbc->nr_to_write <= 0;
Chris Masond1310b22008-01-24 16:13:08 -05003436 }
3437 pagevec_release(&pvec);
3438 cond_resched();
3439 }
3440 if (!scanned && !done) {
3441 /*
3442 * We hit the last page and there is more work to be done: wrap
3443 * back to the start of the file
3444 */
3445 scanned = 1;
3446 index = 0;
3447 goto retry;
3448 }
Josef Bacik7fd1a3f2012-06-27 17:18:41 -04003449 btrfs_add_delayed_iput(inode);
Chris Masond1310b22008-01-24 16:13:08 -05003450 return ret;
3451}
Chris Masond1310b22008-01-24 16:13:08 -05003452
Chris Masonffbd5172009-04-20 15:50:09 -04003453static void flush_epd_write_bio(struct extent_page_data *epd)
3454{
3455 if (epd->bio) {
Jeff Mahoney355808c2011-10-03 23:23:14 -04003456 int rw = WRITE;
3457 int ret;
3458
Chris Masonffbd5172009-04-20 15:50:09 -04003459 if (epd->sync_io)
Jeff Mahoney355808c2011-10-03 23:23:14 -04003460 rw = WRITE_SYNC;
3461
3462 ret = submit_one_bio(rw, epd->bio, 0, 0);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003463 BUG_ON(ret < 0); /* -ENOMEM */
Chris Masonffbd5172009-04-20 15:50:09 -04003464 epd->bio = NULL;
3465 }
3466}
3467
Chris Masond2c3f4f2008-11-19 12:44:22 -05003468static noinline void flush_write_bio(void *data)
3469{
3470 struct extent_page_data *epd = data;
Chris Masonffbd5172009-04-20 15:50:09 -04003471 flush_epd_write_bio(epd);
Chris Masond2c3f4f2008-11-19 12:44:22 -05003472}
3473
Chris Masond1310b22008-01-24 16:13:08 -05003474int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3475 get_extent_t *get_extent,
3476 struct writeback_control *wbc)
3477{
3478 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05003479 struct extent_page_data epd = {
3480 .bio = NULL,
3481 .tree = tree,
3482 .get_extent = get_extent,
Chris Mason771ed682008-11-06 22:02:51 -05003483 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04003484 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Chris Masond1310b22008-01-24 16:13:08 -05003485 };
Chris Masond1310b22008-01-24 16:13:08 -05003486
Chris Masond1310b22008-01-24 16:13:08 -05003487 ret = __extent_writepage(page, wbc, &epd);
3488
Chris Masonffbd5172009-04-20 15:50:09 -04003489 flush_epd_write_bio(&epd);
Chris Masond1310b22008-01-24 16:13:08 -05003490 return ret;
3491}
Chris Masond1310b22008-01-24 16:13:08 -05003492
Chris Mason771ed682008-11-06 22:02:51 -05003493int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3494 u64 start, u64 end, get_extent_t *get_extent,
3495 int mode)
3496{
3497 int ret = 0;
3498 struct address_space *mapping = inode->i_mapping;
3499 struct page *page;
3500 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3501 PAGE_CACHE_SHIFT;
3502
3503 struct extent_page_data epd = {
3504 .bio = NULL,
3505 .tree = tree,
3506 .get_extent = get_extent,
3507 .extent_locked = 1,
Chris Masonffbd5172009-04-20 15:50:09 -04003508 .sync_io = mode == WB_SYNC_ALL,
Chris Mason771ed682008-11-06 22:02:51 -05003509 };
3510 struct writeback_control wbc_writepages = {
Chris Mason771ed682008-11-06 22:02:51 -05003511 .sync_mode = mode,
Chris Mason771ed682008-11-06 22:02:51 -05003512 .nr_to_write = nr_pages * 2,
3513 .range_start = start,
3514 .range_end = end + 1,
3515 };
3516
Chris Masond3977122009-01-05 21:25:51 -05003517 while (start <= end) {
Chris Mason771ed682008-11-06 22:02:51 -05003518 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3519 if (clear_page_dirty_for_io(page))
3520 ret = __extent_writepage(page, &wbc_writepages, &epd);
3521 else {
3522 if (tree->ops && tree->ops->writepage_end_io_hook)
3523 tree->ops->writepage_end_io_hook(page, start,
3524 start + PAGE_CACHE_SIZE - 1,
3525 NULL, 1);
3526 unlock_page(page);
3527 }
3528 page_cache_release(page);
3529 start += PAGE_CACHE_SIZE;
3530 }
3531
Chris Masonffbd5172009-04-20 15:50:09 -04003532 flush_epd_write_bio(&epd);
Chris Mason771ed682008-11-06 22:02:51 -05003533 return ret;
3534}
Chris Masond1310b22008-01-24 16:13:08 -05003535
3536int extent_writepages(struct extent_io_tree *tree,
3537 struct address_space *mapping,
3538 get_extent_t *get_extent,
3539 struct writeback_control *wbc)
3540{
3541 int ret = 0;
3542 struct extent_page_data epd = {
3543 .bio = NULL,
3544 .tree = tree,
3545 .get_extent = get_extent,
Chris Mason771ed682008-11-06 22:02:51 -05003546 .extent_locked = 0,
Chris Masonffbd5172009-04-20 15:50:09 -04003547 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
Chris Masond1310b22008-01-24 16:13:08 -05003548 };
3549
Chris Mason4bef0842008-09-08 11:18:08 -04003550 ret = extent_write_cache_pages(tree, mapping, wbc,
Chris Masond2c3f4f2008-11-19 12:44:22 -05003551 __extent_writepage, &epd,
3552 flush_write_bio);
Chris Masonffbd5172009-04-20 15:50:09 -04003553 flush_epd_write_bio(&epd);
Chris Masond1310b22008-01-24 16:13:08 -05003554 return ret;
3555}
Chris Masond1310b22008-01-24 16:13:08 -05003556
3557int extent_readpages(struct extent_io_tree *tree,
3558 struct address_space *mapping,
3559 struct list_head *pages, unsigned nr_pages,
3560 get_extent_t get_extent)
3561{
3562 struct bio *bio = NULL;
3563 unsigned page_idx;
Chris Masonc8b97812008-10-29 14:49:59 -04003564 unsigned long bio_flags = 0;
Liu Bo67c96842012-07-20 21:43:09 -06003565 struct page *pagepool[16];
3566 struct page *page;
3567 int i = 0;
3568 int nr = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003569
Chris Masond1310b22008-01-24 16:13:08 -05003570 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
Liu Bo67c96842012-07-20 21:43:09 -06003571 page = list_entry(pages->prev, struct page, lru);
Chris Masond1310b22008-01-24 16:13:08 -05003572
3573 prefetchw(&page->flags);
3574 list_del(&page->lru);
Liu Bo67c96842012-07-20 21:43:09 -06003575 if (add_to_page_cache_lru(page, mapping,
Itaru Kitayama43e817a2011-04-25 19:43:51 -04003576 page->index, GFP_NOFS)) {
Liu Bo67c96842012-07-20 21:43:09 -06003577 page_cache_release(page);
3578 continue;
Chris Masond1310b22008-01-24 16:13:08 -05003579 }
Liu Bo67c96842012-07-20 21:43:09 -06003580
3581 pagepool[nr++] = page;
3582 if (nr < ARRAY_SIZE(pagepool))
3583 continue;
3584 for (i = 0; i < nr; i++) {
3585 __extent_read_full_page(tree, pagepool[i], get_extent,
3586 &bio, 0, &bio_flags);
3587 page_cache_release(pagepool[i]);
3588 }
3589 nr = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003590 }
Liu Bo67c96842012-07-20 21:43:09 -06003591 for (i = 0; i < nr; i++) {
3592 __extent_read_full_page(tree, pagepool[i], get_extent,
3593 &bio, 0, &bio_flags);
3594 page_cache_release(pagepool[i]);
3595 }
3596
Chris Masond1310b22008-01-24 16:13:08 -05003597 BUG_ON(!list_empty(pages));
3598 if (bio)
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003599 return submit_one_bio(READ, bio, 0, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05003600 return 0;
3601}
Chris Masond1310b22008-01-24 16:13:08 -05003602
3603/*
3604 * basic invalidatepage code, this waits on any locked or writeback
3605 * ranges corresponding to the page, and then deletes any extent state
3606 * records from the tree
3607 */
3608int extent_invalidatepage(struct extent_io_tree *tree,
3609 struct page *page, unsigned long offset)
3610{
Josef Bacik2ac55d42010-02-03 19:33:23 +00003611 struct extent_state *cached_state = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05003612 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
3613 u64 end = start + PAGE_CACHE_SIZE - 1;
3614 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3615
Chris Masond3977122009-01-05 21:25:51 -05003616 start += (offset + blocksize - 1) & ~(blocksize - 1);
Chris Masond1310b22008-01-24 16:13:08 -05003617 if (start > end)
3618 return 0;
3619
Jeff Mahoneyd0082372012-03-01 14:57:19 +01003620 lock_extent_bits(tree, start, end, 0, &cached_state);
Chris Mason1edbb732009-09-02 13:24:36 -04003621 wait_on_page_writeback(page);
Chris Masond1310b22008-01-24 16:13:08 -05003622 clear_extent_bit(tree, start, end,
Josef Bacik32c00af2009-10-08 13:34:05 -04003623 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3624 EXTENT_DO_ACCOUNTING,
Josef Bacik2ac55d42010-02-03 19:33:23 +00003625 1, 1, &cached_state, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05003626 return 0;
3627}
Chris Masond1310b22008-01-24 16:13:08 -05003628
3629/*
Chris Mason7b13b7b2008-04-18 10:29:50 -04003630 * a helper for releasepage, this tests for areas of the page that
3631 * are locked or under IO and drops the related state bits if it is safe
3632 * to drop the page.
3633 */
3634int try_release_extent_state(struct extent_map_tree *map,
3635 struct extent_io_tree *tree, struct page *page,
3636 gfp_t mask)
3637{
3638 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3639 u64 end = start + PAGE_CACHE_SIZE - 1;
3640 int ret = 1;
3641
Chris Mason211f90e2008-07-18 11:56:15 -04003642 if (test_range_bit(tree, start, end,
Chris Mason8b62b722009-09-02 16:53:46 -04003643 EXTENT_IOBITS, 0, NULL))
Chris Mason7b13b7b2008-04-18 10:29:50 -04003644 ret = 0;
3645 else {
3646 if ((mask & GFP_NOFS) == GFP_NOFS)
3647 mask = GFP_NOFS;
Chris Mason11ef1602009-09-23 20:28:46 -04003648 /*
3649 * at this point we can safely clear everything except the
3650 * locked bit and the nodatasum bit
3651 */
Chris Masone3f24cc2011-02-14 12:52:08 -05003652 ret = clear_extent_bit(tree, start, end,
Chris Mason11ef1602009-09-23 20:28:46 -04003653 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
3654 0, 0, NULL, mask);
Chris Masone3f24cc2011-02-14 12:52:08 -05003655
3656 /* if clear_extent_bit failed for enomem reasons,
3657 * we can't allow the release to continue.
3658 */
3659 if (ret < 0)
3660 ret = 0;
3661 else
3662 ret = 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04003663 }
3664 return ret;
3665}
Chris Mason7b13b7b2008-04-18 10:29:50 -04003666
3667/*
Chris Masond1310b22008-01-24 16:13:08 -05003668 * a helper for releasepage. As long as there are no locked extents
3669 * in the range corresponding to the page, both state records and extent
3670 * map records are removed
3671 */
3672int try_release_extent_mapping(struct extent_map_tree *map,
Chris Mason70dec802008-01-29 09:59:12 -05003673 struct extent_io_tree *tree, struct page *page,
3674 gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05003675{
3676 struct extent_map *em;
3677 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3678 u64 end = start + PAGE_CACHE_SIZE - 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04003679
Chris Mason70dec802008-01-29 09:59:12 -05003680 if ((mask & __GFP_WAIT) &&
3681 page->mapping->host->i_size > 16 * 1024 * 1024) {
Yan39b56372008-02-15 10:40:50 -05003682 u64 len;
Chris Mason70dec802008-01-29 09:59:12 -05003683 while (start <= end) {
Yan39b56372008-02-15 10:40:50 -05003684 len = end - start + 1;
Chris Mason890871b2009-09-02 16:24:52 -04003685 write_lock(&map->lock);
Yan39b56372008-02-15 10:40:50 -05003686 em = lookup_extent_mapping(map, start, len);
Tsutomu Itoh285190d2012-02-16 16:23:58 +09003687 if (!em) {
Chris Mason890871b2009-09-02 16:24:52 -04003688 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05003689 break;
3690 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04003691 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
3692 em->start != start) {
Chris Mason890871b2009-09-02 16:24:52 -04003693 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05003694 free_extent_map(em);
3695 break;
3696 }
3697 if (!test_range_bit(tree, em->start,
3698 extent_map_end(em) - 1,
Chris Mason8b62b722009-09-02 16:53:46 -04003699 EXTENT_LOCKED | EXTENT_WRITEBACK,
Chris Mason9655d292009-09-02 15:22:30 -04003700 0, NULL)) {
Chris Mason70dec802008-01-29 09:59:12 -05003701 remove_extent_mapping(map, em);
3702 /* once for the rb tree */
3703 free_extent_map(em);
3704 }
3705 start = extent_map_end(em);
Chris Mason890871b2009-09-02 16:24:52 -04003706 write_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05003707
3708 /* once for us */
Chris Masond1310b22008-01-24 16:13:08 -05003709 free_extent_map(em);
3710 }
Chris Masond1310b22008-01-24 16:13:08 -05003711 }
Chris Mason7b13b7b2008-04-18 10:29:50 -04003712 return try_release_extent_state(map, tree, page, mask);
Chris Masond1310b22008-01-24 16:13:08 -05003713}
Chris Masond1310b22008-01-24 16:13:08 -05003714
Chris Masonec29ed52011-02-23 16:23:20 -05003715/*
3716 * helper function for fiemap, which doesn't want to see any holes.
3717 * This maps until we find something past 'last'
3718 */
3719static struct extent_map *get_extent_skip_holes(struct inode *inode,
3720 u64 offset,
3721 u64 last,
3722 get_extent_t *get_extent)
3723{
3724 u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
3725 struct extent_map *em;
3726 u64 len;
3727
3728 if (offset >= last)
3729 return NULL;
3730
3731 while(1) {
3732 len = last - offset;
3733 if (len == 0)
3734 break;
3735 len = (len + sectorsize - 1) & ~(sectorsize - 1);
3736 em = get_extent(inode, NULL, 0, offset, len, 0);
David Sterbac7040052011-04-19 18:00:01 +02003737 if (IS_ERR_OR_NULL(em))
Chris Masonec29ed52011-02-23 16:23:20 -05003738 return em;
3739
3740 /* if this isn't a hole return it */
3741 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
3742 em->block_start != EXTENT_MAP_HOLE) {
3743 return em;
3744 }
3745
3746 /* this is a hole, advance to the next extent */
3747 offset = extent_map_end(em);
3748 free_extent_map(em);
3749 if (offset >= last)
3750 break;
3751 }
3752 return NULL;
3753}
3754
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003755int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3756 __u64 start, __u64 len, get_extent_t *get_extent)
3757{
Josef Bacik975f84f2010-11-23 19:36:57 +00003758 int ret = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003759 u64 off = start;
3760 u64 max = start + len;
3761 u32 flags = 0;
Josef Bacik975f84f2010-11-23 19:36:57 +00003762 u32 found_type;
3763 u64 last;
Chris Masonec29ed52011-02-23 16:23:20 -05003764 u64 last_for_get_extent = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003765 u64 disko = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05003766 u64 isize = i_size_read(inode);
Josef Bacik975f84f2010-11-23 19:36:57 +00003767 struct btrfs_key found_key;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003768 struct extent_map *em = NULL;
Josef Bacik2ac55d42010-02-03 19:33:23 +00003769 struct extent_state *cached_state = NULL;
Josef Bacik975f84f2010-11-23 19:36:57 +00003770 struct btrfs_path *path;
3771 struct btrfs_file_extent_item *item;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003772 int end = 0;
Chris Masonec29ed52011-02-23 16:23:20 -05003773 u64 em_start = 0;
3774 u64 em_len = 0;
3775 u64 em_end = 0;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003776 unsigned long emflags;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003777
3778 if (len == 0)
3779 return -EINVAL;
3780
Josef Bacik975f84f2010-11-23 19:36:57 +00003781 path = btrfs_alloc_path();
3782 if (!path)
3783 return -ENOMEM;
3784 path->leave_spinning = 1;
3785
Josef Bacik4d479cf2011-11-17 11:34:31 -05003786 start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
3787 len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
3788
Chris Masonec29ed52011-02-23 16:23:20 -05003789 /*
3790 * lookup the last file extent. We're not using i_size here
3791 * because there might be preallocation past i_size
3792 */
Josef Bacik975f84f2010-11-23 19:36:57 +00003793 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
Li Zefan33345d012011-04-20 10:31:50 +08003794 path, btrfs_ino(inode), -1, 0);
Josef Bacik975f84f2010-11-23 19:36:57 +00003795 if (ret < 0) {
3796 btrfs_free_path(path);
3797 return ret;
3798 }
3799 WARN_ON(!ret);
3800 path->slots[0]--;
3801 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3802 struct btrfs_file_extent_item);
3803 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
3804 found_type = btrfs_key_type(&found_key);
3805
Chris Masonec29ed52011-02-23 16:23:20 -05003806 /* No extents, but there might be delalloc bits */
Li Zefan33345d012011-04-20 10:31:50 +08003807 if (found_key.objectid != btrfs_ino(inode) ||
Josef Bacik975f84f2010-11-23 19:36:57 +00003808 found_type != BTRFS_EXTENT_DATA_KEY) {
Chris Masonec29ed52011-02-23 16:23:20 -05003809 /* have to trust i_size as the end */
3810 last = (u64)-1;
3811 last_for_get_extent = isize;
3812 } else {
3813 /*
3814 * remember the start of the last extent. There are a
3815 * bunch of different factors that go into the length of the
3816 * extent, so its much less complex to remember where it started
3817 */
3818 last = found_key.offset;
3819 last_for_get_extent = last + 1;
Josef Bacik975f84f2010-11-23 19:36:57 +00003820 }
Josef Bacik975f84f2010-11-23 19:36:57 +00003821 btrfs_free_path(path);
3822
Chris Masonec29ed52011-02-23 16:23:20 -05003823 /*
3824 * we might have some extents allocated but more delalloc past those
3825 * extents. so, we trust isize unless the start of the last extent is
3826 * beyond isize
3827 */
3828 if (last < isize) {
3829 last = (u64)-1;
3830 last_for_get_extent = isize;
3831 }
3832
Josef Bacik2ac55d42010-02-03 19:33:23 +00003833 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
Jeff Mahoneyd0082372012-03-01 14:57:19 +01003834 &cached_state);
Chris Masonec29ed52011-02-23 16:23:20 -05003835
Josef Bacik4d479cf2011-11-17 11:34:31 -05003836 em = get_extent_skip_holes(inode, start, last_for_get_extent,
Chris Masonec29ed52011-02-23 16:23:20 -05003837 get_extent);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003838 if (!em)
3839 goto out;
3840 if (IS_ERR(em)) {
3841 ret = PTR_ERR(em);
3842 goto out;
3843 }
Josef Bacik975f84f2010-11-23 19:36:57 +00003844
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003845 while (!end) {
Chris Masonea8efc72011-03-08 11:54:40 -05003846 u64 offset_in_extent;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003847
Chris Masonea8efc72011-03-08 11:54:40 -05003848 /* break if the extent we found is outside the range */
3849 if (em->start >= max || extent_map_end(em) < off)
3850 break;
3851
3852 /*
3853 * get_extent may return an extent that starts before our
3854 * requested range. We have to make sure the ranges
3855 * we return to fiemap always move forward and don't
3856 * overlap, so adjust the offsets here
3857 */
3858 em_start = max(em->start, off);
3859
3860 /*
3861 * record the offset from the start of the extent
3862 * for adjusting the disk offset below
3863 */
3864 offset_in_extent = em_start - em->start;
Chris Masonec29ed52011-02-23 16:23:20 -05003865 em_end = extent_map_end(em);
Chris Masonea8efc72011-03-08 11:54:40 -05003866 em_len = em_end - em_start;
Chris Masonec29ed52011-02-23 16:23:20 -05003867 emflags = em->flags;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003868 disko = 0;
3869 flags = 0;
3870
Chris Masonea8efc72011-03-08 11:54:40 -05003871 /*
3872 * bump off for our next call to get_extent
3873 */
3874 off = extent_map_end(em);
3875 if (off >= max)
3876 end = 1;
3877
Heiko Carstens93dbfad2009-04-03 10:33:45 -04003878 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003879 end = 1;
3880 flags |= FIEMAP_EXTENT_LAST;
Heiko Carstens93dbfad2009-04-03 10:33:45 -04003881 } else if (em->block_start == EXTENT_MAP_INLINE) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003882 flags |= (FIEMAP_EXTENT_DATA_INLINE |
3883 FIEMAP_EXTENT_NOT_ALIGNED);
Heiko Carstens93dbfad2009-04-03 10:33:45 -04003884 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003885 flags |= (FIEMAP_EXTENT_DELALLOC |
3886 FIEMAP_EXTENT_UNKNOWN);
Heiko Carstens93dbfad2009-04-03 10:33:45 -04003887 } else {
Chris Masonea8efc72011-03-08 11:54:40 -05003888 disko = em->block_start + offset_in_extent;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003889 }
3890 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
3891 flags |= FIEMAP_EXTENT_ENCODED;
3892
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003893 free_extent_map(em);
3894 em = NULL;
Chris Masonec29ed52011-02-23 16:23:20 -05003895 if ((em_start >= last) || em_len == (u64)-1 ||
3896 (last == (u64)-1 && isize <= em_end)) {
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003897 flags |= FIEMAP_EXTENT_LAST;
3898 end = 1;
3899 }
3900
Chris Masonec29ed52011-02-23 16:23:20 -05003901 /* now scan forward to see if this is really the last extent. */
3902 em = get_extent_skip_holes(inode, off, last_for_get_extent,
3903 get_extent);
3904 if (IS_ERR(em)) {
3905 ret = PTR_ERR(em);
3906 goto out;
3907 }
3908 if (!em) {
Josef Bacik975f84f2010-11-23 19:36:57 +00003909 flags |= FIEMAP_EXTENT_LAST;
3910 end = 1;
3911 }
Chris Masonec29ed52011-02-23 16:23:20 -05003912 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3913 em_len, flags);
3914 if (ret)
3915 goto out_free;
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003916 }
3917out_free:
3918 free_extent_map(em);
3919out:
Josef Bacik2ac55d42010-02-03 19:33:23 +00003920 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
3921 &cached_state, GFP_NOFS);
Yehuda Sadeh1506fcc2009-01-21 14:39:14 -05003922 return ret;
3923}
3924
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02003925inline struct page *extent_buffer_page(struct extent_buffer *eb,
Chris Masond1310b22008-01-24 16:13:08 -05003926 unsigned long i)
3927{
Chris Mason727011e2010-08-06 13:21:20 -04003928 return eb->pages[i];
Chris Masond1310b22008-01-24 16:13:08 -05003929}
3930
Jan Schmidt4a54c8c2011-07-22 15:41:52 +02003931inline unsigned long num_extent_pages(u64 start, u64 len)
Chris Masonce9adaa2008-04-09 16:28:12 -04003932{
Chris Mason6af118ce2008-07-22 11:18:07 -04003933 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
3934 (start >> PAGE_CACHE_SHIFT);
Chris Mason728131d2008-04-09 16:28:12 -04003935}
3936
Chris Mason727011e2010-08-06 13:21:20 -04003937static void __free_extent_buffer(struct extent_buffer *eb)
3938{
3939#if LEAK_DEBUG
3940 unsigned long flags;
3941 spin_lock_irqsave(&leak_lock, flags);
3942 list_del(&eb->leak_list);
3943 spin_unlock_irqrestore(&leak_lock, flags);
3944#endif
3945 if (eb->pages && eb->pages != eb->inline_pages)
3946 kfree(eb->pages);
3947 kmem_cache_free(extent_buffer_cache, eb);
3948}
3949
Chris Masond1310b22008-01-24 16:13:08 -05003950static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3951 u64 start,
3952 unsigned long len,
3953 gfp_t mask)
3954{
3955 struct extent_buffer *eb = NULL;
Chris Mason39351272009-02-04 09:24:05 -05003956#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -04003957 unsigned long flags;
Chris Mason4bef0842008-09-08 11:18:08 -04003958#endif
Chris Masond1310b22008-01-24 16:13:08 -05003959
Chris Masond1310b22008-01-24 16:13:08 -05003960 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
Tsutomu Itoh91ca3382011-01-05 02:32:22 +00003961 if (eb == NULL)
3962 return NULL;
Chris Masond1310b22008-01-24 16:13:08 -05003963 eb->start = start;
3964 eb->len = len;
Josef Bacik4f2de97a2012-03-07 16:20:05 -05003965 eb->tree = tree;
Jan Schmidt815a51c2012-05-16 17:00:02 +02003966 eb->bflags = 0;
Chris Masonbd681512011-07-16 15:23:14 -04003967 rwlock_init(&eb->lock);
3968 atomic_set(&eb->write_locks, 0);
3969 atomic_set(&eb->read_locks, 0);
3970 atomic_set(&eb->blocking_readers, 0);
3971 atomic_set(&eb->blocking_writers, 0);
3972 atomic_set(&eb->spinning_readers, 0);
3973 atomic_set(&eb->spinning_writers, 0);
Arne Jansen5b25f702011-09-13 10:55:48 +02003974 eb->lock_nested = 0;
Chris Masonbd681512011-07-16 15:23:14 -04003975 init_waitqueue_head(&eb->write_lock_wq);
3976 init_waitqueue_head(&eb->read_lock_wq);
Chris Masonb4ce94d2009-02-04 09:25:08 -05003977
Chris Mason39351272009-02-04 09:24:05 -05003978#if LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -04003979 spin_lock_irqsave(&leak_lock, flags);
3980 list_add(&eb->leak_list, &buffers);
3981 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -04003982#endif
Josef Bacik3083ee22012-03-09 16:01:49 -05003983 spin_lock_init(&eb->refs_lock);
Chris Masond1310b22008-01-24 16:13:08 -05003984 atomic_set(&eb->refs, 1);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04003985 atomic_set(&eb->io_pages, 0);
Chris Mason727011e2010-08-06 13:21:20 -04003986
3987 if (len > MAX_INLINE_EXTENT_BUFFER_SIZE) {
3988 struct page **pages;
3989 int num_pages = (len + PAGE_CACHE_SIZE - 1) >>
3990 PAGE_CACHE_SHIFT;
3991 pages = kzalloc(num_pages, mask);
3992 if (!pages) {
3993 __free_extent_buffer(eb);
3994 return NULL;
3995 }
3996 eb->pages = pages;
3997 } else {
3998 eb->pages = eb->inline_pages;
3999 }
Chris Masond1310b22008-01-24 16:13:08 -05004000
4001 return eb;
4002}
4003
Jan Schmidt815a51c2012-05-16 17:00:02 +02004004struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4005{
4006 unsigned long i;
4007 struct page *p;
4008 struct extent_buffer *new;
4009 unsigned long num_pages = num_extent_pages(src->start, src->len);
4010
4011 new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_ATOMIC);
4012 if (new == NULL)
4013 return NULL;
4014
4015 for (i = 0; i < num_pages; i++) {
4016 p = alloc_page(GFP_ATOMIC);
4017 BUG_ON(!p);
4018 attach_extent_buffer_page(new, p);
4019 WARN_ON(PageDirty(p));
4020 SetPageUptodate(p);
4021 new->pages[i] = p;
4022 }
4023
4024 copy_extent_buffer(new, src, 0, 0, src->len);
4025 set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
4026 set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
4027
4028 return new;
4029}
4030
4031struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
4032{
4033 struct extent_buffer *eb;
4034 unsigned long num_pages = num_extent_pages(0, len);
4035 unsigned long i;
4036
4037 eb = __alloc_extent_buffer(NULL, start, len, GFP_ATOMIC);
4038 if (!eb)
4039 return NULL;
4040
4041 for (i = 0; i < num_pages; i++) {
4042 eb->pages[i] = alloc_page(GFP_ATOMIC);
4043 if (!eb->pages[i])
4044 goto err;
4045 }
4046 set_extent_buffer_uptodate(eb);
4047 btrfs_set_header_nritems(eb, 0);
4048 set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4049
4050 return eb;
4051err:
4052 for (i--; i > 0; i--)
4053 __free_page(eb->pages[i]);
4054 __free_extent_buffer(eb);
4055 return NULL;
4056}
4057
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004058static int extent_buffer_under_io(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05004059{
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004060 return (atomic_read(&eb->io_pages) ||
4061 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4062 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
Chris Masond1310b22008-01-24 16:13:08 -05004063}
4064
Miao Xie897ca6e92010-10-26 20:57:29 -04004065/*
4066 * Helper for releasing extent buffer page.
4067 */
4068static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
4069 unsigned long start_idx)
4070{
4071 unsigned long index;
Wang Sheng-Hui39bab872012-04-06 14:35:31 +08004072 unsigned long num_pages;
Miao Xie897ca6e92010-10-26 20:57:29 -04004073 struct page *page;
Jan Schmidt815a51c2012-05-16 17:00:02 +02004074 int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
Miao Xie897ca6e92010-10-26 20:57:29 -04004075
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004076 BUG_ON(extent_buffer_under_io(eb));
Miao Xie897ca6e92010-10-26 20:57:29 -04004077
Wang Sheng-Hui39bab872012-04-06 14:35:31 +08004078 num_pages = num_extent_pages(eb->start, eb->len);
4079 index = start_idx + num_pages;
Miao Xie897ca6e92010-10-26 20:57:29 -04004080 if (start_idx >= index)
4081 return;
4082
4083 do {
4084 index--;
4085 page = extent_buffer_page(eb, index);
Jan Schmidt815a51c2012-05-16 17:00:02 +02004086 if (page && mapped) {
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004087 spin_lock(&page->mapping->private_lock);
4088 /*
4089 * We do this since we'll remove the pages after we've
4090 * removed the eb from the radix tree, so we could race
4091 * and have this page now attached to the new eb. So
4092 * only clear page_private if it's still connected to
4093 * this eb.
4094 */
4095 if (PagePrivate(page) &&
4096 page->private == (unsigned long)eb) {
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004097 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
Josef Bacik3083ee22012-03-09 16:01:49 -05004098 BUG_ON(PageDirty(page));
4099 BUG_ON(PageWriteback(page));
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004100 /*
4101 * We need to make sure we haven't be attached
4102 * to a new eb.
4103 */
4104 ClearPagePrivate(page);
4105 set_page_private(page, 0);
4106 /* One for the page private */
4107 page_cache_release(page);
4108 }
4109 spin_unlock(&page->mapping->private_lock);
4110
Jan Schmidt815a51c2012-05-16 17:00:02 +02004111 }
4112 if (page) {
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004113 /* One for when we alloced the page */
Miao Xie897ca6e92010-10-26 20:57:29 -04004114 page_cache_release(page);
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004115 }
Miao Xie897ca6e92010-10-26 20:57:29 -04004116 } while (index != start_idx);
4117}
4118
4119/*
4120 * Helper for releasing the extent buffer.
4121 */
4122static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4123{
4124 btrfs_release_extent_buffer_page(eb, 0);
4125 __free_extent_buffer(eb);
4126}
4127
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004128static void check_buffer_tree_ref(struct extent_buffer *eb)
4129{
4130 /* the ref bit is tricky. We have to make sure it is set
4131 * if we have the buffer dirty. Otherwise the
4132 * code to free a buffer can end up dropping a dirty
4133 * page
4134 *
4135 * Once the ref bit is set, it won't go away while the
4136 * buffer is dirty or in writeback, and it also won't
4137 * go away while we have the reference count on the
4138 * eb bumped.
4139 *
4140 * We can't just set the ref bit without bumping the
4141 * ref on the eb because free_extent_buffer might
4142 * see the ref bit and try to clear it. If this happens
4143 * free_extent_buffer might end up dropping our original
4144 * ref by mistake and freeing the page before we are able
4145 * to add one more ref.
4146 *
4147 * So bump the ref count first, then set the bit. If someone
4148 * beat us to it, drop the ref we added.
4149 */
Josef Bacik594831c2012-07-20 16:11:08 -04004150 spin_lock(&eb->refs_lock);
4151 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004152 atomic_inc(&eb->refs);
Josef Bacik594831c2012-07-20 16:11:08 -04004153 spin_unlock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004154}
4155
Josef Bacik5df42352012-03-15 18:24:42 -04004156static void mark_extent_buffer_accessed(struct extent_buffer *eb)
4157{
4158 unsigned long num_pages, i;
4159
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004160 check_buffer_tree_ref(eb);
4161
Josef Bacik5df42352012-03-15 18:24:42 -04004162 num_pages = num_extent_pages(eb->start, eb->len);
4163 for (i = 0; i < num_pages; i++) {
4164 struct page *p = extent_buffer_page(eb, i);
4165 mark_page_accessed(p);
4166 }
4167}
4168
Chris Masond1310b22008-01-24 16:13:08 -05004169struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
Chris Mason727011e2010-08-06 13:21:20 -04004170 u64 start, unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05004171{
4172 unsigned long num_pages = num_extent_pages(start, len);
4173 unsigned long i;
4174 unsigned long index = start >> PAGE_CACHE_SHIFT;
4175 struct extent_buffer *eb;
Chris Mason6af118ce2008-07-22 11:18:07 -04004176 struct extent_buffer *exists = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05004177 struct page *p;
4178 struct address_space *mapping = tree->mapping;
4179 int uptodate = 1;
Miao Xie19fe0a82010-10-26 20:57:29 -04004180 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05004181
Miao Xie19fe0a82010-10-26 20:57:29 -04004182 rcu_read_lock();
4183 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4184 if (eb && atomic_inc_not_zero(&eb->refs)) {
4185 rcu_read_unlock();
Josef Bacik5df42352012-03-15 18:24:42 -04004186 mark_extent_buffer_accessed(eb);
Chris Mason6af118ce2008-07-22 11:18:07 -04004187 return eb;
4188 }
Miao Xie19fe0a82010-10-26 20:57:29 -04004189 rcu_read_unlock();
Chris Mason6af118ce2008-07-22 11:18:07 -04004190
David Sterbaba144192011-04-21 01:12:06 +02004191 eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
Peter2b114d12008-04-01 11:21:40 -04004192 if (!eb)
Chris Masond1310b22008-01-24 16:13:08 -05004193 return NULL;
4194
Chris Mason727011e2010-08-06 13:21:20 -04004195 for (i = 0; i < num_pages; i++, index++) {
Chris Masona6591712011-07-19 12:04:14 -04004196 p = find_or_create_page(mapping, index, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05004197 if (!p) {
4198 WARN_ON(1);
Chris Mason6af118ce2008-07-22 11:18:07 -04004199 goto free_eb;
Chris Masond1310b22008-01-24 16:13:08 -05004200 }
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004201
4202 spin_lock(&mapping->private_lock);
4203 if (PagePrivate(p)) {
4204 /*
4205 * We could have already allocated an eb for this page
4206 * and attached one so lets see if we can get a ref on
4207 * the existing eb, and if we can we know it's good and
4208 * we can just return that one, else we know we can just
4209 * overwrite page->private.
4210 */
4211 exists = (struct extent_buffer *)p->private;
4212 if (atomic_inc_not_zero(&exists->refs)) {
4213 spin_unlock(&mapping->private_lock);
4214 unlock_page(p);
Josef Bacik17de39a2012-05-04 15:16:06 -04004215 page_cache_release(p);
Josef Bacik5df42352012-03-15 18:24:42 -04004216 mark_extent_buffer_accessed(exists);
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004217 goto free_eb;
4218 }
4219
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004220 /*
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004221 * Do this so attach doesn't complain and we need to
4222 * drop the ref the old guy had.
4223 */
4224 ClearPagePrivate(p);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004225 WARN_ON(PageDirty(p));
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004226 page_cache_release(p);
Chris Masond1310b22008-01-24 16:13:08 -05004227 }
Josef Bacik4f2de97a2012-03-07 16:20:05 -05004228 attach_extent_buffer_page(eb, p);
4229 spin_unlock(&mapping->private_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004230 WARN_ON(PageDirty(p));
Chris Masond1310b22008-01-24 16:13:08 -05004231 mark_page_accessed(p);
Chris Mason727011e2010-08-06 13:21:20 -04004232 eb->pages[i] = p;
Chris Masond1310b22008-01-24 16:13:08 -05004233 if (!PageUptodate(p))
4234 uptodate = 0;
Chris Masoneb14ab82011-02-10 12:35:00 -05004235
4236 /*
4237 * see below about how we avoid a nasty race with release page
4238 * and why we unlock later
4239 */
Chris Masond1310b22008-01-24 16:13:08 -05004240 }
4241 if (uptodate)
Chris Masonb4ce94d2009-02-04 09:25:08 -05004242 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Josef Bacik115391d2012-03-09 09:51:43 -05004243again:
Miao Xie19fe0a82010-10-26 20:57:29 -04004244 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4245 if (ret)
4246 goto free_eb;
4247
Chris Mason6af118ce2008-07-22 11:18:07 -04004248 spin_lock(&tree->buffer_lock);
Miao Xie19fe0a82010-10-26 20:57:29 -04004249 ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
4250 if (ret == -EEXIST) {
4251 exists = radix_tree_lookup(&tree->buffer,
4252 start >> PAGE_CACHE_SHIFT);
Josef Bacik115391d2012-03-09 09:51:43 -05004253 if (!atomic_inc_not_zero(&exists->refs)) {
4254 spin_unlock(&tree->buffer_lock);
4255 radix_tree_preload_end();
Josef Bacik115391d2012-03-09 09:51:43 -05004256 exists = NULL;
4257 goto again;
4258 }
Chris Mason6af118ce2008-07-22 11:18:07 -04004259 spin_unlock(&tree->buffer_lock);
Miao Xie19fe0a82010-10-26 20:57:29 -04004260 radix_tree_preload_end();
Josef Bacik5df42352012-03-15 18:24:42 -04004261 mark_extent_buffer_accessed(exists);
Chris Mason6af118ce2008-07-22 11:18:07 -04004262 goto free_eb;
4263 }
Chris Mason6af118ce2008-07-22 11:18:07 -04004264 /* add one reference for the tree */
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004265 check_buffer_tree_ref(eb);
Yan, Zhengf044ba72010-02-04 08:46:56 +00004266 spin_unlock(&tree->buffer_lock);
Miao Xie19fe0a82010-10-26 20:57:29 -04004267 radix_tree_preload_end();
Chris Masoneb14ab82011-02-10 12:35:00 -05004268
4269 /*
4270 * there is a race where release page may have
4271 * tried to find this extent buffer in the radix
4272 * but failed. It will tell the VM it is safe to
4273 * reclaim the, and it will clear the page private bit.
4274 * We must make sure to set the page private bit properly
4275 * after the extent buffer is in the radix tree so
4276 * it doesn't get lost
4277 */
Chris Mason727011e2010-08-06 13:21:20 -04004278 SetPageChecked(eb->pages[0]);
4279 for (i = 1; i < num_pages; i++) {
4280 p = extent_buffer_page(eb, i);
Chris Mason727011e2010-08-06 13:21:20 -04004281 ClearPageChecked(p);
4282 unlock_page(p);
4283 }
4284 unlock_page(eb->pages[0]);
Chris Masond1310b22008-01-24 16:13:08 -05004285 return eb;
4286
Chris Mason6af118ce2008-07-22 11:18:07 -04004287free_eb:
Chris Mason727011e2010-08-06 13:21:20 -04004288 for (i = 0; i < num_pages; i++) {
4289 if (eb->pages[i])
4290 unlock_page(eb->pages[i]);
4291 }
Chris Masoneb14ab82011-02-10 12:35:00 -05004292
Josef Bacik17de39a2012-05-04 15:16:06 -04004293 WARN_ON(!atomic_dec_and_test(&eb->refs));
Miao Xie897ca6e92010-10-26 20:57:29 -04004294 btrfs_release_extent_buffer(eb);
Chris Mason6af118ce2008-07-22 11:18:07 -04004295 return exists;
Chris Masond1310b22008-01-24 16:13:08 -05004296}
Chris Masond1310b22008-01-24 16:13:08 -05004297
4298struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
David Sterbaf09d1f62011-04-21 01:08:01 +02004299 u64 start, unsigned long len)
Chris Masond1310b22008-01-24 16:13:08 -05004300{
Chris Masond1310b22008-01-24 16:13:08 -05004301 struct extent_buffer *eb;
Chris Masond1310b22008-01-24 16:13:08 -05004302
Miao Xie19fe0a82010-10-26 20:57:29 -04004303 rcu_read_lock();
4304 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4305 if (eb && atomic_inc_not_zero(&eb->refs)) {
4306 rcu_read_unlock();
Josef Bacik5df42352012-03-15 18:24:42 -04004307 mark_extent_buffer_accessed(eb);
Miao Xie19fe0a82010-10-26 20:57:29 -04004308 return eb;
4309 }
4310 rcu_read_unlock();
Josef Bacik0f9dd462008-09-23 13:14:11 -04004311
Miao Xie19fe0a82010-10-26 20:57:29 -04004312 return NULL;
Chris Masond1310b22008-01-24 16:13:08 -05004313}
Chris Masond1310b22008-01-24 16:13:08 -05004314
Josef Bacik3083ee22012-03-09 16:01:49 -05004315static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4316{
4317 struct extent_buffer *eb =
4318 container_of(head, struct extent_buffer, rcu_head);
4319
4320 __free_extent_buffer(eb);
4321}
4322
Josef Bacik3083ee22012-03-09 16:01:49 -05004323/* Expects to have eb->eb_lock already held */
Josef Bacike64860a2012-07-20 16:05:36 -04004324static int release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
Josef Bacik3083ee22012-03-09 16:01:49 -05004325{
4326 WARN_ON(atomic_read(&eb->refs) == 0);
4327 if (atomic_dec_and_test(&eb->refs)) {
Jan Schmidt815a51c2012-05-16 17:00:02 +02004328 if (test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) {
4329 spin_unlock(&eb->refs_lock);
4330 } else {
4331 struct extent_io_tree *tree = eb->tree;
Josef Bacik3083ee22012-03-09 16:01:49 -05004332
Jan Schmidt815a51c2012-05-16 17:00:02 +02004333 spin_unlock(&eb->refs_lock);
Josef Bacik3083ee22012-03-09 16:01:49 -05004334
Jan Schmidt815a51c2012-05-16 17:00:02 +02004335 spin_lock(&tree->buffer_lock);
4336 radix_tree_delete(&tree->buffer,
4337 eb->start >> PAGE_CACHE_SHIFT);
4338 spin_unlock(&tree->buffer_lock);
4339 }
Josef Bacik3083ee22012-03-09 16:01:49 -05004340
4341 /* Should be safe to release our pages at this point */
4342 btrfs_release_extent_buffer_page(eb, 0);
Josef Bacik3083ee22012-03-09 16:01:49 -05004343 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
Josef Bacike64860a2012-07-20 16:05:36 -04004344 return 1;
Josef Bacik3083ee22012-03-09 16:01:49 -05004345 }
4346 spin_unlock(&eb->refs_lock);
Josef Bacike64860a2012-07-20 16:05:36 -04004347
4348 return 0;
Josef Bacik3083ee22012-03-09 16:01:49 -05004349}
4350
Chris Masond1310b22008-01-24 16:13:08 -05004351void free_extent_buffer(struct extent_buffer *eb)
4352{
Chris Masond1310b22008-01-24 16:13:08 -05004353 if (!eb)
4354 return;
4355
Josef Bacik3083ee22012-03-09 16:01:49 -05004356 spin_lock(&eb->refs_lock);
4357 if (atomic_read(&eb->refs) == 2 &&
Jan Schmidt815a51c2012-05-16 17:00:02 +02004358 test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
4359 atomic_dec(&eb->refs);
4360
4361 if (atomic_read(&eb->refs) == 2 &&
Josef Bacik3083ee22012-03-09 16:01:49 -05004362 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004363 !extent_buffer_under_io(eb) &&
Josef Bacik3083ee22012-03-09 16:01:49 -05004364 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4365 atomic_dec(&eb->refs);
Chris Masond1310b22008-01-24 16:13:08 -05004366
Josef Bacik3083ee22012-03-09 16:01:49 -05004367 /*
4368 * I know this is terrible, but it's temporary until we stop tracking
4369 * the uptodate bits and such for the extent buffers.
4370 */
4371 release_extent_buffer(eb, GFP_ATOMIC);
Chris Masond1310b22008-01-24 16:13:08 -05004372}
Chris Masond1310b22008-01-24 16:13:08 -05004373
Josef Bacik3083ee22012-03-09 16:01:49 -05004374void free_extent_buffer_stale(struct extent_buffer *eb)
4375{
4376 if (!eb)
Chris Masond1310b22008-01-24 16:13:08 -05004377 return;
4378
Josef Bacik3083ee22012-03-09 16:01:49 -05004379 spin_lock(&eb->refs_lock);
4380 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4381
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004382 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
Josef Bacik3083ee22012-03-09 16:01:49 -05004383 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4384 atomic_dec(&eb->refs);
4385 release_extent_buffer(eb, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05004386}
4387
Chris Mason1d4284b2012-03-28 20:31:37 -04004388void clear_extent_buffer_dirty(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05004389{
Chris Masond1310b22008-01-24 16:13:08 -05004390 unsigned long i;
4391 unsigned long num_pages;
4392 struct page *page;
4393
Chris Masond1310b22008-01-24 16:13:08 -05004394 num_pages = num_extent_pages(eb->start, eb->len);
4395
4396 for (i = 0; i < num_pages; i++) {
4397 page = extent_buffer_page(eb, i);
Chris Masonb9473432009-03-13 11:00:37 -04004398 if (!PageDirty(page))
Chris Masond2c3f4f2008-11-19 12:44:22 -05004399 continue;
4400
Chris Masona61e6f22008-07-22 11:18:08 -04004401 lock_page(page);
Chris Masoneb14ab82011-02-10 12:35:00 -05004402 WARN_ON(!PagePrivate(page));
4403
Chris Masond1310b22008-01-24 16:13:08 -05004404 clear_page_dirty_for_io(page);
Sven Wegener0ee0fda2008-07-30 16:54:26 -04004405 spin_lock_irq(&page->mapping->tree_lock);
Chris Masond1310b22008-01-24 16:13:08 -05004406 if (!PageDirty(page)) {
4407 radix_tree_tag_clear(&page->mapping->page_tree,
4408 page_index(page),
4409 PAGECACHE_TAG_DIRTY);
4410 }
Sven Wegener0ee0fda2008-07-30 16:54:26 -04004411 spin_unlock_irq(&page->mapping->tree_lock);
Chris Masonbf0da8c2011-11-04 12:29:37 -04004412 ClearPageError(page);
Chris Masona61e6f22008-07-22 11:18:08 -04004413 unlock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05004414 }
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004415 WARN_ON(atomic_read(&eb->refs) == 0);
Chris Masond1310b22008-01-24 16:13:08 -05004416}
Chris Masond1310b22008-01-24 16:13:08 -05004417
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004418int set_extent_buffer_dirty(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05004419{
4420 unsigned long i;
4421 unsigned long num_pages;
Chris Masonb9473432009-03-13 11:00:37 -04004422 int was_dirty = 0;
Chris Masond1310b22008-01-24 16:13:08 -05004423
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004424 check_buffer_tree_ref(eb);
4425
Chris Masonb9473432009-03-13 11:00:37 -04004426 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004427
Chris Masond1310b22008-01-24 16:13:08 -05004428 num_pages = num_extent_pages(eb->start, eb->len);
Josef Bacik3083ee22012-03-09 16:01:49 -05004429 WARN_ON(atomic_read(&eb->refs) == 0);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004430 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4431
Chris Masonb9473432009-03-13 11:00:37 -04004432 for (i = 0; i < num_pages; i++)
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004433 set_page_dirty(extent_buffer_page(eb, i));
Chris Masonb9473432009-03-13 11:00:37 -04004434 return was_dirty;
Chris Masond1310b22008-01-24 16:13:08 -05004435}
Chris Masond1310b22008-01-24 16:13:08 -05004436
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004437static int range_straddles_pages(u64 start, u64 len)
Chris Mason19b6caf2011-07-25 06:50:50 -04004438{
4439 if (len < PAGE_CACHE_SIZE)
4440 return 1;
4441 if (start & (PAGE_CACHE_SIZE - 1))
4442 return 1;
4443 if ((start + len) & (PAGE_CACHE_SIZE - 1))
4444 return 1;
4445 return 0;
4446}
4447
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004448int clear_extent_buffer_uptodate(struct extent_buffer *eb)
Chris Mason1259ab72008-05-12 13:39:03 -04004449{
4450 unsigned long i;
4451 struct page *page;
4452 unsigned long num_pages;
4453
Chris Masonb4ce94d2009-02-04 09:25:08 -05004454 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004455 num_pages = num_extent_pages(eb->start, eb->len);
Chris Mason1259ab72008-05-12 13:39:03 -04004456 for (i = 0; i < num_pages; i++) {
4457 page = extent_buffer_page(eb, i);
Chris Mason33958dc2008-07-30 10:29:12 -04004458 if (page)
4459 ClearPageUptodate(page);
Chris Mason1259ab72008-05-12 13:39:03 -04004460 }
4461 return 0;
4462}
4463
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004464int set_extent_buffer_uptodate(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05004465{
4466 unsigned long i;
4467 struct page *page;
4468 unsigned long num_pages;
4469
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004470 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masond1310b22008-01-24 16:13:08 -05004471 num_pages = num_extent_pages(eb->start, eb->len);
Chris Masond1310b22008-01-24 16:13:08 -05004472 for (i = 0; i < num_pages; i++) {
4473 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05004474 SetPageUptodate(page);
4475 }
4476 return 0;
4477}
Chris Masond1310b22008-01-24 16:13:08 -05004478
Chris Masonce9adaa2008-04-09 16:28:12 -04004479int extent_range_uptodate(struct extent_io_tree *tree,
4480 u64 start, u64 end)
4481{
4482 struct page *page;
4483 int ret;
4484 int pg_uptodate = 1;
4485 int uptodate;
4486 unsigned long index;
4487
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004488 if (range_straddles_pages(start, end - start + 1)) {
Chris Mason19b6caf2011-07-25 06:50:50 -04004489 ret = test_range_bit(tree, start, end,
4490 EXTENT_UPTODATE, 1, NULL);
4491 if (ret)
4492 return 1;
4493 }
Chris Masond3977122009-01-05 21:25:51 -05004494 while (start <= end) {
Chris Masonce9adaa2008-04-09 16:28:12 -04004495 index = start >> PAGE_CACHE_SHIFT;
4496 page = find_get_page(tree->mapping, index);
Mitch Harder8bedd512012-01-26 15:01:11 -05004497 if (!page)
4498 return 1;
Chris Masonce9adaa2008-04-09 16:28:12 -04004499 uptodate = PageUptodate(page);
4500 page_cache_release(page);
4501 if (!uptodate) {
4502 pg_uptodate = 0;
4503 break;
4504 }
4505 start += PAGE_CACHE_SIZE;
4506 }
4507 return pg_uptodate;
4508}
4509
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004510int extent_buffer_uptodate(struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05004511{
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004512 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masond1310b22008-01-24 16:13:08 -05004513}
Chris Masond1310b22008-01-24 16:13:08 -05004514
4515int read_extent_buffer_pages(struct extent_io_tree *tree,
Arne Jansenbb82ab82011-06-10 14:06:53 +02004516 struct extent_buffer *eb, u64 start, int wait,
Chris Masonf1885912008-04-09 16:28:12 -04004517 get_extent_t *get_extent, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05004518{
4519 unsigned long i;
4520 unsigned long start_i;
4521 struct page *page;
4522 int err;
4523 int ret = 0;
Chris Masonce9adaa2008-04-09 16:28:12 -04004524 int locked_pages = 0;
4525 int all_uptodate = 1;
Chris Masond1310b22008-01-24 16:13:08 -05004526 unsigned long num_pages;
Chris Mason727011e2010-08-06 13:21:20 -04004527 unsigned long num_reads = 0;
Chris Masona86c12c2008-02-07 10:50:54 -05004528 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04004529 unsigned long bio_flags = 0;
Chris Masona86c12c2008-02-07 10:50:54 -05004530
Chris Masonb4ce94d2009-02-04 09:25:08 -05004531 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
Chris Masond1310b22008-01-24 16:13:08 -05004532 return 0;
4533
Chris Masond1310b22008-01-24 16:13:08 -05004534 if (start) {
4535 WARN_ON(start < eb->start);
4536 start_i = (start >> PAGE_CACHE_SHIFT) -
4537 (eb->start >> PAGE_CACHE_SHIFT);
4538 } else {
4539 start_i = 0;
4540 }
4541
4542 num_pages = num_extent_pages(eb->start, eb->len);
4543 for (i = start_i; i < num_pages; i++) {
4544 page = extent_buffer_page(eb, i);
Arne Jansenbb82ab82011-06-10 14:06:53 +02004545 if (wait == WAIT_NONE) {
David Woodhouse2db04962008-08-07 11:19:43 -04004546 if (!trylock_page(page))
Chris Masonce9adaa2008-04-09 16:28:12 -04004547 goto unlock_exit;
Chris Masond1310b22008-01-24 16:13:08 -05004548 } else {
4549 lock_page(page);
4550 }
Chris Masonce9adaa2008-04-09 16:28:12 -04004551 locked_pages++;
Chris Mason727011e2010-08-06 13:21:20 -04004552 if (!PageUptodate(page)) {
4553 num_reads++;
Chris Masonce9adaa2008-04-09 16:28:12 -04004554 all_uptodate = 0;
Chris Mason727011e2010-08-06 13:21:20 -04004555 }
Chris Masonce9adaa2008-04-09 16:28:12 -04004556 }
4557 if (all_uptodate) {
4558 if (start_i == 0)
Chris Masonb4ce94d2009-02-04 09:25:08 -05004559 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
Chris Masonce9adaa2008-04-09 16:28:12 -04004560 goto unlock_exit;
4561 }
4562
Josef Bacikea466792012-03-26 21:57:36 -04004563 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
Josef Bacik5cf1ab52012-04-16 09:42:26 -04004564 eb->read_mirror = 0;
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004565 atomic_set(&eb->io_pages, num_reads);
Chris Masonce9adaa2008-04-09 16:28:12 -04004566 for (i = start_i; i < num_pages; i++) {
4567 page = extent_buffer_page(eb, i);
Chris Masonce9adaa2008-04-09 16:28:12 -04004568 if (!PageUptodate(page)) {
Chris Masonf1885912008-04-09 16:28:12 -04004569 ClearPageError(page);
Chris Masona86c12c2008-02-07 10:50:54 -05004570 err = __extent_read_full_page(tree, page,
Chris Masonf1885912008-04-09 16:28:12 -04004571 get_extent, &bio,
Chris Masonc8b97812008-10-29 14:49:59 -04004572 mirror_num, &bio_flags);
Chris Masond3977122009-01-05 21:25:51 -05004573 if (err)
Chris Masond1310b22008-01-24 16:13:08 -05004574 ret = err;
Chris Masond1310b22008-01-24 16:13:08 -05004575 } else {
4576 unlock_page(page);
4577 }
4578 }
4579
Jeff Mahoney355808c2011-10-03 23:23:14 -04004580 if (bio) {
4581 err = submit_one_bio(READ, bio, mirror_num, bio_flags);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01004582 if (err)
4583 return err;
Jeff Mahoney355808c2011-10-03 23:23:14 -04004584 }
Chris Masona86c12c2008-02-07 10:50:54 -05004585
Arne Jansenbb82ab82011-06-10 14:06:53 +02004586 if (ret || wait != WAIT_COMPLETE)
Chris Masond1310b22008-01-24 16:13:08 -05004587 return ret;
Chris Masond3977122009-01-05 21:25:51 -05004588
Chris Masond1310b22008-01-24 16:13:08 -05004589 for (i = start_i; i < num_pages; i++) {
4590 page = extent_buffer_page(eb, i);
4591 wait_on_page_locked(page);
Chris Masond3977122009-01-05 21:25:51 -05004592 if (!PageUptodate(page))
Chris Masond1310b22008-01-24 16:13:08 -05004593 ret = -EIO;
Chris Masond1310b22008-01-24 16:13:08 -05004594 }
Chris Masond3977122009-01-05 21:25:51 -05004595
Chris Masond1310b22008-01-24 16:13:08 -05004596 return ret;
Chris Masonce9adaa2008-04-09 16:28:12 -04004597
4598unlock_exit:
4599 i = start_i;
Chris Masond3977122009-01-05 21:25:51 -05004600 while (locked_pages > 0) {
Chris Masonce9adaa2008-04-09 16:28:12 -04004601 page = extent_buffer_page(eb, i);
4602 i++;
4603 unlock_page(page);
4604 locked_pages--;
4605 }
4606 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05004607}
Chris Masond1310b22008-01-24 16:13:08 -05004608
4609void read_extent_buffer(struct extent_buffer *eb, void *dstv,
4610 unsigned long start,
4611 unsigned long len)
4612{
4613 size_t cur;
4614 size_t offset;
4615 struct page *page;
4616 char *kaddr;
4617 char *dst = (char *)dstv;
4618 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4619 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05004620
4621 WARN_ON(start > eb->len);
4622 WARN_ON(start + len > eb->start + eb->len);
4623
4624 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4625
Chris Masond3977122009-01-05 21:25:51 -05004626 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05004627 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05004628
4629 cur = min(len, (PAGE_CACHE_SIZE - offset));
Chris Masona6591712011-07-19 12:04:14 -04004630 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05004631 memcpy(dst, kaddr + offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05004632
4633 dst += cur;
4634 len -= cur;
4635 offset = 0;
4636 i++;
4637 }
4638}
Chris Masond1310b22008-01-24 16:13:08 -05004639
4640int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
Chris Masona6591712011-07-19 12:04:14 -04004641 unsigned long min_len, char **map,
Chris Masond1310b22008-01-24 16:13:08 -05004642 unsigned long *map_start,
Chris Masona6591712011-07-19 12:04:14 -04004643 unsigned long *map_len)
Chris Masond1310b22008-01-24 16:13:08 -05004644{
4645 size_t offset = start & (PAGE_CACHE_SIZE - 1);
4646 char *kaddr;
4647 struct page *p;
4648 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4649 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4650 unsigned long end_i = (start_offset + start + min_len - 1) >>
4651 PAGE_CACHE_SHIFT;
4652
4653 if (i != end_i)
4654 return -EINVAL;
4655
4656 if (i == 0) {
4657 offset = start_offset;
4658 *map_start = 0;
4659 } else {
4660 offset = 0;
4661 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
4662 }
Chris Masond3977122009-01-05 21:25:51 -05004663
Chris Masond1310b22008-01-24 16:13:08 -05004664 if (start + min_len > eb->len) {
Chris Masond3977122009-01-05 21:25:51 -05004665 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
4666 "wanted %lu %lu\n", (unsigned long long)eb->start,
4667 eb->len, start, min_len);
Chris Masond1310b22008-01-24 16:13:08 -05004668 WARN_ON(1);
Josef Bacik850265332011-03-15 14:52:12 -04004669 return -EINVAL;
Chris Masond1310b22008-01-24 16:13:08 -05004670 }
4671
4672 p = extent_buffer_page(eb, i);
Chris Masona6591712011-07-19 12:04:14 -04004673 kaddr = page_address(p);
Chris Masond1310b22008-01-24 16:13:08 -05004674 *map = kaddr + offset;
4675 *map_len = PAGE_CACHE_SIZE - offset;
4676 return 0;
4677}
Chris Masond1310b22008-01-24 16:13:08 -05004678
Chris Masond1310b22008-01-24 16:13:08 -05004679int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
4680 unsigned long start,
4681 unsigned long len)
4682{
4683 size_t cur;
4684 size_t offset;
4685 struct page *page;
4686 char *kaddr;
4687 char *ptr = (char *)ptrv;
4688 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4689 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4690 int ret = 0;
4691
4692 WARN_ON(start > eb->len);
4693 WARN_ON(start + len > eb->start + eb->len);
4694
4695 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4696
Chris Masond3977122009-01-05 21:25:51 -05004697 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05004698 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05004699
4700 cur = min(len, (PAGE_CACHE_SIZE - offset));
4701
Chris Masona6591712011-07-19 12:04:14 -04004702 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05004703 ret = memcmp(ptr, kaddr + offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05004704 if (ret)
4705 break;
4706
4707 ptr += cur;
4708 len -= cur;
4709 offset = 0;
4710 i++;
4711 }
4712 return ret;
4713}
Chris Masond1310b22008-01-24 16:13:08 -05004714
4715void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
4716 unsigned long start, unsigned long len)
4717{
4718 size_t cur;
4719 size_t offset;
4720 struct page *page;
4721 char *kaddr;
4722 char *src = (char *)srcv;
4723 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4724 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4725
4726 WARN_ON(start > eb->len);
4727 WARN_ON(start + len > eb->start + eb->len);
4728
4729 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4730
Chris Masond3977122009-01-05 21:25:51 -05004731 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05004732 page = extent_buffer_page(eb, i);
4733 WARN_ON(!PageUptodate(page));
4734
4735 cur = min(len, PAGE_CACHE_SIZE - offset);
Chris Masona6591712011-07-19 12:04:14 -04004736 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05004737 memcpy(kaddr + offset, src, cur);
Chris Masond1310b22008-01-24 16:13:08 -05004738
4739 src += cur;
4740 len -= cur;
4741 offset = 0;
4742 i++;
4743 }
4744}
Chris Masond1310b22008-01-24 16:13:08 -05004745
4746void memset_extent_buffer(struct extent_buffer *eb, char c,
4747 unsigned long start, unsigned long len)
4748{
4749 size_t cur;
4750 size_t offset;
4751 struct page *page;
4752 char *kaddr;
4753 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4754 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4755
4756 WARN_ON(start > eb->len);
4757 WARN_ON(start + len > eb->start + eb->len);
4758
4759 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4760
Chris Masond3977122009-01-05 21:25:51 -05004761 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05004762 page = extent_buffer_page(eb, i);
4763 WARN_ON(!PageUptodate(page));
4764
4765 cur = min(len, PAGE_CACHE_SIZE - offset);
Chris Masona6591712011-07-19 12:04:14 -04004766 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05004767 memset(kaddr + offset, c, cur);
Chris Masond1310b22008-01-24 16:13:08 -05004768
4769 len -= cur;
4770 offset = 0;
4771 i++;
4772 }
4773}
Chris Masond1310b22008-01-24 16:13:08 -05004774
4775void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
4776 unsigned long dst_offset, unsigned long src_offset,
4777 unsigned long len)
4778{
4779 u64 dst_len = dst->len;
4780 size_t cur;
4781 size_t offset;
4782 struct page *page;
4783 char *kaddr;
4784 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4785 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4786
4787 WARN_ON(src->len != dst_len);
4788
4789 offset = (start_offset + dst_offset) &
4790 ((unsigned long)PAGE_CACHE_SIZE - 1);
4791
Chris Masond3977122009-01-05 21:25:51 -05004792 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05004793 page = extent_buffer_page(dst, i);
4794 WARN_ON(!PageUptodate(page));
4795
4796 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
4797
Chris Masona6591712011-07-19 12:04:14 -04004798 kaddr = page_address(page);
Chris Masond1310b22008-01-24 16:13:08 -05004799 read_extent_buffer(src, kaddr + offset, src_offset, cur);
Chris Masond1310b22008-01-24 16:13:08 -05004800
4801 src_offset += cur;
4802 len -= cur;
4803 offset = 0;
4804 i++;
4805 }
4806}
Chris Masond1310b22008-01-24 16:13:08 -05004807
4808static void move_pages(struct page *dst_page, struct page *src_page,
4809 unsigned long dst_off, unsigned long src_off,
4810 unsigned long len)
4811{
Chris Masona6591712011-07-19 12:04:14 -04004812 char *dst_kaddr = page_address(dst_page);
Chris Masond1310b22008-01-24 16:13:08 -05004813 if (dst_page == src_page) {
4814 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
4815 } else {
Chris Masona6591712011-07-19 12:04:14 -04004816 char *src_kaddr = page_address(src_page);
Chris Masond1310b22008-01-24 16:13:08 -05004817 char *p = dst_kaddr + dst_off + len;
4818 char *s = src_kaddr + src_off + len;
4819
4820 while (len--)
4821 *--p = *--s;
Chris Masond1310b22008-01-24 16:13:08 -05004822 }
Chris Masond1310b22008-01-24 16:13:08 -05004823}
4824
Sergei Trofimovich33872062011-04-11 21:52:52 +00004825static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4826{
4827 unsigned long distance = (src > dst) ? src - dst : dst - src;
4828 return distance < len;
4829}
4830
Chris Masond1310b22008-01-24 16:13:08 -05004831static void copy_pages(struct page *dst_page, struct page *src_page,
4832 unsigned long dst_off, unsigned long src_off,
4833 unsigned long len)
4834{
Chris Masona6591712011-07-19 12:04:14 -04004835 char *dst_kaddr = page_address(dst_page);
Chris Masond1310b22008-01-24 16:13:08 -05004836 char *src_kaddr;
Chris Mason727011e2010-08-06 13:21:20 -04004837 int must_memmove = 0;
Chris Masond1310b22008-01-24 16:13:08 -05004838
Sergei Trofimovich33872062011-04-11 21:52:52 +00004839 if (dst_page != src_page) {
Chris Masona6591712011-07-19 12:04:14 -04004840 src_kaddr = page_address(src_page);
Sergei Trofimovich33872062011-04-11 21:52:52 +00004841 } else {
Chris Masond1310b22008-01-24 16:13:08 -05004842 src_kaddr = dst_kaddr;
Chris Mason727011e2010-08-06 13:21:20 -04004843 if (areas_overlap(src_off, dst_off, len))
4844 must_memmove = 1;
Sergei Trofimovich33872062011-04-11 21:52:52 +00004845 }
Chris Masond1310b22008-01-24 16:13:08 -05004846
Chris Mason727011e2010-08-06 13:21:20 -04004847 if (must_memmove)
4848 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
4849 else
4850 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
Chris Masond1310b22008-01-24 16:13:08 -05004851}
4852
4853void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4854 unsigned long src_offset, unsigned long len)
4855{
4856 size_t cur;
4857 size_t dst_off_in_page;
4858 size_t src_off_in_page;
4859 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4860 unsigned long dst_i;
4861 unsigned long src_i;
4862
4863 if (src_offset + len > dst->len) {
Chris Masond3977122009-01-05 21:25:51 -05004864 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4865 "len %lu dst len %lu\n", src_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05004866 BUG_ON(1);
4867 }
4868 if (dst_offset + len > dst->len) {
Chris Masond3977122009-01-05 21:25:51 -05004869 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4870 "len %lu dst len %lu\n", dst_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05004871 BUG_ON(1);
4872 }
4873
Chris Masond3977122009-01-05 21:25:51 -05004874 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05004875 dst_off_in_page = (start_offset + dst_offset) &
4876 ((unsigned long)PAGE_CACHE_SIZE - 1);
4877 src_off_in_page = (start_offset + src_offset) &
4878 ((unsigned long)PAGE_CACHE_SIZE - 1);
4879
4880 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4881 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
4882
4883 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
4884 src_off_in_page));
4885 cur = min_t(unsigned long, cur,
4886 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
4887
4888 copy_pages(extent_buffer_page(dst, dst_i),
4889 extent_buffer_page(dst, src_i),
4890 dst_off_in_page, src_off_in_page, cur);
4891
4892 src_offset += cur;
4893 dst_offset += cur;
4894 len -= cur;
4895 }
4896}
Chris Masond1310b22008-01-24 16:13:08 -05004897
4898void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4899 unsigned long src_offset, unsigned long len)
4900{
4901 size_t cur;
4902 size_t dst_off_in_page;
4903 size_t src_off_in_page;
4904 unsigned long dst_end = dst_offset + len - 1;
4905 unsigned long src_end = src_offset + len - 1;
4906 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4907 unsigned long dst_i;
4908 unsigned long src_i;
4909
4910 if (src_offset + len > dst->len) {
Chris Masond3977122009-01-05 21:25:51 -05004911 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4912 "len %lu len %lu\n", src_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05004913 BUG_ON(1);
4914 }
4915 if (dst_offset + len > dst->len) {
Chris Masond3977122009-01-05 21:25:51 -05004916 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4917 "len %lu len %lu\n", dst_offset, len, dst->len);
Chris Masond1310b22008-01-24 16:13:08 -05004918 BUG_ON(1);
4919 }
Chris Mason727011e2010-08-06 13:21:20 -04004920 if (dst_offset < src_offset) {
Chris Masond1310b22008-01-24 16:13:08 -05004921 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4922 return;
4923 }
Chris Masond3977122009-01-05 21:25:51 -05004924 while (len > 0) {
Chris Masond1310b22008-01-24 16:13:08 -05004925 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
4926 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
4927
4928 dst_off_in_page = (start_offset + dst_end) &
4929 ((unsigned long)PAGE_CACHE_SIZE - 1);
4930 src_off_in_page = (start_offset + src_end) &
4931 ((unsigned long)PAGE_CACHE_SIZE - 1);
4932
4933 cur = min_t(unsigned long, len, src_off_in_page + 1);
4934 cur = min(cur, dst_off_in_page + 1);
4935 move_pages(extent_buffer_page(dst, dst_i),
4936 extent_buffer_page(dst, src_i),
4937 dst_off_in_page - cur + 1,
4938 src_off_in_page - cur + 1, cur);
4939
4940 dst_end -= cur;
4941 src_end -= cur;
4942 len -= cur;
4943 }
4944}
Chris Mason6af118ce2008-07-22 11:18:07 -04004945
Josef Bacik3083ee22012-03-09 16:01:49 -05004946int try_release_extent_buffer(struct page *page, gfp_t mask)
Miao Xie19fe0a82010-10-26 20:57:29 -04004947{
Chris Mason6af118ce2008-07-22 11:18:07 -04004948 struct extent_buffer *eb;
Miao Xie897ca6e92010-10-26 20:57:29 -04004949
Miao Xie19fe0a82010-10-26 20:57:29 -04004950 /*
Josef Bacik3083ee22012-03-09 16:01:49 -05004951 * We need to make sure noboody is attaching this page to an eb right
4952 * now.
Miao Xie19fe0a82010-10-26 20:57:29 -04004953 */
Josef Bacik3083ee22012-03-09 16:01:49 -05004954 spin_lock(&page->mapping->private_lock);
4955 if (!PagePrivate(page)) {
4956 spin_unlock(&page->mapping->private_lock);
4957 return 1;
Miao Xie19fe0a82010-10-26 20:57:29 -04004958 }
4959
Josef Bacik3083ee22012-03-09 16:01:49 -05004960 eb = (struct extent_buffer *)page->private;
4961 BUG_ON(!eb);
Miao Xie19fe0a82010-10-26 20:57:29 -04004962
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004963 /*
Josef Bacik3083ee22012-03-09 16:01:49 -05004964 * This is a little awful but should be ok, we need to make sure that
4965 * the eb doesn't disappear out from under us while we're looking at
4966 * this page.
4967 */
4968 spin_lock(&eb->refs_lock);
Josef Bacik0b32f4b2012-03-13 09:38:00 -04004969 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
Josef Bacik3083ee22012-03-09 16:01:49 -05004970 spin_unlock(&eb->refs_lock);
4971 spin_unlock(&page->mapping->private_lock);
4972 return 0;
4973 }
4974 spin_unlock(&page->mapping->private_lock);
4975
4976 if ((mask & GFP_NOFS) == GFP_NOFS)
4977 mask = GFP_NOFS;
4978
4979 /*
4980 * If tree ref isn't set then we know the ref on this eb is a real ref,
4981 * so just return, this page will likely be freed soon anyway.
4982 */
4983 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4984 spin_unlock(&eb->refs_lock);
4985 return 0;
4986 }
Josef Bacik3083ee22012-03-09 16:01:49 -05004987
Josef Bacike64860a2012-07-20 16:05:36 -04004988 return release_extent_buffer(eb, mask);
Chris Mason6af118ce2008-07-22 11:18:07 -04004989}