blob: 65a0583027e949c4557fd30090f89da257be5278 [file] [log] [blame]
Chris Masond1310b22008-01-24 16:13:08 -05001#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
5#include <linux/gfp.h>
6#include <linux/pagemap.h>
7#include <linux/page-flags.h>
8#include <linux/module.h>
9#include <linux/spinlock.h>
10#include <linux/blkdev.h>
11#include <linux/swap.h>
12#include <linux/version.h>
13#include <linux/writeback.h>
14#include <linux/pagevec.h>
15#include "extent_io.h"
16#include "extent_map.h"
David Woodhouse2db04962008-08-07 11:19:43 -040017#include "compat.h"
David Woodhouse902b22f2008-08-20 08:51:49 -040018#include "ctree.h"
19#include "btrfs_inode.h"
Chris Masond1310b22008-01-24 16:13:08 -050020
21/* temporary define until extent_map moves out of btrfs */
22struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
23 unsigned long extra_flags,
24 void (*ctor)(void *, struct kmem_cache *,
25 unsigned long));
26
27static struct kmem_cache *extent_state_cache;
28static struct kmem_cache *extent_buffer_cache;
29
30static LIST_HEAD(buffers);
31static LIST_HEAD(states);
Chris Mason4bef0842008-09-08 11:18:08 -040032
Chris Masonc8b97812008-10-29 14:49:59 -040033#define LEAK_DEBUG 1
Chris Mason4bef0842008-09-08 11:18:08 -040034#ifdef LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -040035static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
Chris Mason4bef0842008-09-08 11:18:08 -040036#endif
Chris Masond1310b22008-01-24 16:13:08 -050037
Chris Masond1310b22008-01-24 16:13:08 -050038#define BUFFER_LRU_MAX 64
39
40struct tree_entry {
41 u64 start;
42 u64 end;
Chris Masond1310b22008-01-24 16:13:08 -050043 struct rb_node rb_node;
44};
45
46struct extent_page_data {
47 struct bio *bio;
48 struct extent_io_tree *tree;
49 get_extent_t *get_extent;
50};
51
52int __init extent_io_init(void)
53{
54 extent_state_cache = btrfs_cache_create("extent_state",
55 sizeof(struct extent_state), 0,
56 NULL);
57 if (!extent_state_cache)
58 return -ENOMEM;
59
60 extent_buffer_cache = btrfs_cache_create("extent_buffers",
61 sizeof(struct extent_buffer), 0,
62 NULL);
63 if (!extent_buffer_cache)
64 goto free_state_cache;
65 return 0;
66
67free_state_cache:
68 kmem_cache_destroy(extent_state_cache);
69 return -ENOMEM;
70}
71
72void extent_io_exit(void)
73{
74 struct extent_state *state;
Chris Mason2d2ae542008-03-26 16:24:23 -040075 struct extent_buffer *eb;
Chris Masond1310b22008-01-24 16:13:08 -050076
77 while (!list_empty(&states)) {
Chris Mason2d2ae542008-03-26 16:24:23 -040078 state = list_entry(states.next, struct extent_state, leak_list);
Chris Mason70dec802008-01-29 09:59:12 -050079 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
Chris Mason2d2ae542008-03-26 16:24:23 -040080 list_del(&state->leak_list);
Chris Masond1310b22008-01-24 16:13:08 -050081 kmem_cache_free(extent_state_cache, state);
82
83 }
84
Chris Mason2d2ae542008-03-26 16:24:23 -040085 while (!list_empty(&buffers)) {
86 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
87 printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs));
88 list_del(&eb->leak_list);
89 kmem_cache_free(extent_buffer_cache, eb);
90 }
Chris Masond1310b22008-01-24 16:13:08 -050091 if (extent_state_cache)
92 kmem_cache_destroy(extent_state_cache);
93 if (extent_buffer_cache)
94 kmem_cache_destroy(extent_buffer_cache);
95}
96
97void extent_io_tree_init(struct extent_io_tree *tree,
98 struct address_space *mapping, gfp_t mask)
99{
100 tree->state.rb_node = NULL;
Chris Mason6af118ce2008-07-22 11:18:07 -0400101 tree->buffer.rb_node = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500102 tree->ops = NULL;
103 tree->dirty_bytes = 0;
Chris Mason70dec802008-01-29 09:59:12 -0500104 spin_lock_init(&tree->lock);
Chris Mason6af118ce2008-07-22 11:18:07 -0400105 spin_lock_init(&tree->buffer_lock);
Chris Masond1310b22008-01-24 16:13:08 -0500106 tree->mapping = mapping;
Chris Masond1310b22008-01-24 16:13:08 -0500107}
108EXPORT_SYMBOL(extent_io_tree_init);
109
Chris Masond1310b22008-01-24 16:13:08 -0500110struct extent_state *alloc_extent_state(gfp_t mask)
111{
112 struct extent_state *state;
Chris Mason4bef0842008-09-08 11:18:08 -0400113#ifdef LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400114 unsigned long flags;
Chris Mason4bef0842008-09-08 11:18:08 -0400115#endif
Chris Masond1310b22008-01-24 16:13:08 -0500116
117 state = kmem_cache_alloc(extent_state_cache, mask);
Peter2b114d12008-04-01 11:21:40 -0400118 if (!state)
Chris Masond1310b22008-01-24 16:13:08 -0500119 return state;
120 state->state = 0;
Chris Masond1310b22008-01-24 16:13:08 -0500121 state->private = 0;
Chris Mason70dec802008-01-29 09:59:12 -0500122 state->tree = NULL;
Chris Mason4bef0842008-09-08 11:18:08 -0400123#ifdef LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400124 spin_lock_irqsave(&leak_lock, flags);
125 list_add(&state->leak_list, &states);
126 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -0400127#endif
Chris Masond1310b22008-01-24 16:13:08 -0500128 atomic_set(&state->refs, 1);
129 init_waitqueue_head(&state->wq);
130 return state;
131}
132EXPORT_SYMBOL(alloc_extent_state);
133
134void free_extent_state(struct extent_state *state)
135{
Chris Masond1310b22008-01-24 16:13:08 -0500136 if (!state)
137 return;
138 if (atomic_dec_and_test(&state->refs)) {
Chris Mason4bef0842008-09-08 11:18:08 -0400139#ifdef LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400140 unsigned long flags;
Chris Mason4bef0842008-09-08 11:18:08 -0400141#endif
Chris Mason70dec802008-01-29 09:59:12 -0500142 WARN_ON(state->tree);
Chris Mason4bef0842008-09-08 11:18:08 -0400143#ifdef LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -0400144 spin_lock_irqsave(&leak_lock, flags);
145 list_del(&state->leak_list);
146 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -0400147#endif
Chris Masond1310b22008-01-24 16:13:08 -0500148 kmem_cache_free(extent_state_cache, state);
149 }
150}
151EXPORT_SYMBOL(free_extent_state);
152
153static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
154 struct rb_node *node)
155{
156 struct rb_node ** p = &root->rb_node;
157 struct rb_node * parent = NULL;
158 struct tree_entry *entry;
159
160 while(*p) {
161 parent = *p;
162 entry = rb_entry(parent, struct tree_entry, rb_node);
163
164 if (offset < entry->start)
165 p = &(*p)->rb_left;
166 else if (offset > entry->end)
167 p = &(*p)->rb_right;
168 else
169 return parent;
170 }
171
172 entry = rb_entry(node, struct tree_entry, rb_node);
Chris Masond1310b22008-01-24 16:13:08 -0500173 rb_link_node(node, parent, p);
174 rb_insert_color(node, root);
175 return NULL;
176}
177
Chris Mason80ea96b2008-02-01 14:51:59 -0500178static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
Chris Masond1310b22008-01-24 16:13:08 -0500179 struct rb_node **prev_ret,
180 struct rb_node **next_ret)
181{
Chris Mason80ea96b2008-02-01 14:51:59 -0500182 struct rb_root *root = &tree->state;
Chris Masond1310b22008-01-24 16:13:08 -0500183 struct rb_node * n = root->rb_node;
184 struct rb_node *prev = NULL;
185 struct rb_node *orig_prev = NULL;
186 struct tree_entry *entry;
187 struct tree_entry *prev_entry = NULL;
188
189 while(n) {
190 entry = rb_entry(n, struct tree_entry, rb_node);
191 prev = n;
192 prev_entry = entry;
193
194 if (offset < entry->start)
195 n = n->rb_left;
196 else if (offset > entry->end)
197 n = n->rb_right;
Chris Mason80ea96b2008-02-01 14:51:59 -0500198 else {
Chris Masond1310b22008-01-24 16:13:08 -0500199 return n;
Chris Mason80ea96b2008-02-01 14:51:59 -0500200 }
Chris Masond1310b22008-01-24 16:13:08 -0500201 }
202
203 if (prev_ret) {
204 orig_prev = prev;
205 while(prev && offset > prev_entry->end) {
206 prev = rb_next(prev);
207 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
208 }
209 *prev_ret = prev;
210 prev = orig_prev;
211 }
212
213 if (next_ret) {
214 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
215 while(prev && offset < prev_entry->start) {
216 prev = rb_prev(prev);
217 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
218 }
219 *next_ret = prev;
220 }
221 return NULL;
222}
223
Chris Mason80ea96b2008-02-01 14:51:59 -0500224static inline struct rb_node *tree_search(struct extent_io_tree *tree,
225 u64 offset)
Chris Masond1310b22008-01-24 16:13:08 -0500226{
Chris Mason70dec802008-01-29 09:59:12 -0500227 struct rb_node *prev = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500228 struct rb_node *ret;
Chris Mason70dec802008-01-29 09:59:12 -0500229
Chris Mason80ea96b2008-02-01 14:51:59 -0500230 ret = __etree_search(tree, offset, &prev, NULL);
231 if (!ret) {
Chris Masond1310b22008-01-24 16:13:08 -0500232 return prev;
Chris Mason80ea96b2008-02-01 14:51:59 -0500233 }
Chris Masond1310b22008-01-24 16:13:08 -0500234 return ret;
235}
236
Chris Mason6af118ce2008-07-22 11:18:07 -0400237static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
238 u64 offset, struct rb_node *node)
239{
240 struct rb_root *root = &tree->buffer;
241 struct rb_node ** p = &root->rb_node;
242 struct rb_node * parent = NULL;
243 struct extent_buffer *eb;
244
245 while(*p) {
246 parent = *p;
247 eb = rb_entry(parent, struct extent_buffer, rb_node);
248
249 if (offset < eb->start)
250 p = &(*p)->rb_left;
251 else if (offset > eb->start)
252 p = &(*p)->rb_right;
253 else
254 return eb;
255 }
256
257 rb_link_node(node, parent, p);
258 rb_insert_color(node, root);
259 return NULL;
260}
261
262static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
263 u64 offset)
264{
265 struct rb_root *root = &tree->buffer;
266 struct rb_node * n = root->rb_node;
267 struct extent_buffer *eb;
268
269 while(n) {
270 eb = rb_entry(n, struct extent_buffer, rb_node);
271 if (offset < eb->start)
272 n = n->rb_left;
273 else if (offset > eb->start)
274 n = n->rb_right;
275 else
276 return eb;
277 }
278 return NULL;
279}
280
Chris Masond1310b22008-01-24 16:13:08 -0500281/*
282 * utility function to look for merge candidates inside a given range.
283 * Any extents with matching state are merged together into a single
284 * extent in the tree. Extents with EXTENT_IO in their state field
285 * are not merged because the end_io handlers need to be able to do
286 * operations on them without sleeping (or doing allocations/splits).
287 *
288 * This should be called with the tree lock held.
289 */
290static int merge_state(struct extent_io_tree *tree,
291 struct extent_state *state)
292{
293 struct extent_state *other;
294 struct rb_node *other_node;
295
Zheng Yan5b21f2e2008-09-26 10:05:38 -0400296 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
Chris Masond1310b22008-01-24 16:13:08 -0500297 return 0;
298
299 other_node = rb_prev(&state->rb_node);
300 if (other_node) {
301 other = rb_entry(other_node, struct extent_state, rb_node);
302 if (other->end == state->start - 1 &&
303 other->state == state->state) {
304 state->start = other->start;
Chris Mason70dec802008-01-29 09:59:12 -0500305 other->tree = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500306 rb_erase(&other->rb_node, &tree->state);
307 free_extent_state(other);
308 }
309 }
310 other_node = rb_next(&state->rb_node);
311 if (other_node) {
312 other = rb_entry(other_node, struct extent_state, rb_node);
313 if (other->start == state->end + 1 &&
314 other->state == state->state) {
315 other->start = state->start;
Chris Mason70dec802008-01-29 09:59:12 -0500316 state->tree = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500317 rb_erase(&state->rb_node, &tree->state);
318 free_extent_state(state);
319 }
320 }
321 return 0;
322}
323
Chris Mason291d6732008-01-29 15:55:23 -0500324static void set_state_cb(struct extent_io_tree *tree,
325 struct extent_state *state,
326 unsigned long bits)
327{
328 if (tree->ops && tree->ops->set_bit_hook) {
329 tree->ops->set_bit_hook(tree->mapping->host, state->start,
Chris Masonb0c68f82008-01-31 11:05:37 -0500330 state->end, state->state, bits);
Chris Mason291d6732008-01-29 15:55:23 -0500331 }
332}
333
334static void clear_state_cb(struct extent_io_tree *tree,
335 struct extent_state *state,
336 unsigned long bits)
337{
338 if (tree->ops && tree->ops->set_bit_hook) {
339 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
Chris Masonb0c68f82008-01-31 11:05:37 -0500340 state->end, state->state, bits);
Chris Mason291d6732008-01-29 15:55:23 -0500341 }
342}
343
Chris Masond1310b22008-01-24 16:13:08 -0500344/*
345 * insert an extent_state struct into the tree. 'bits' are set on the
346 * struct before it is inserted.
347 *
348 * This may return -EEXIST if the extent is already there, in which case the
349 * state struct is freed.
350 *
351 * The tree lock is not taken internally. This is a utility function and
352 * probably isn't what you want to call (see set/clear_extent_bit).
353 */
354static int insert_state(struct extent_io_tree *tree,
355 struct extent_state *state, u64 start, u64 end,
356 int bits)
357{
358 struct rb_node *node;
359
360 if (end < start) {
361 printk("end < start %Lu %Lu\n", end, start);
362 WARN_ON(1);
363 }
364 if (bits & EXTENT_DIRTY)
365 tree->dirty_bytes += end - start + 1;
Chris Masonb0c68f82008-01-31 11:05:37 -0500366 set_state_cb(tree, state, bits);
Chris Masond1310b22008-01-24 16:13:08 -0500367 state->state |= bits;
368 state->start = start;
369 state->end = end;
370 node = tree_insert(&tree->state, end, &state->rb_node);
371 if (node) {
372 struct extent_state *found;
373 found = rb_entry(node, struct extent_state, rb_node);
374 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
375 free_extent_state(state);
376 return -EEXIST;
377 }
Chris Mason70dec802008-01-29 09:59:12 -0500378 state->tree = tree;
Chris Masond1310b22008-01-24 16:13:08 -0500379 merge_state(tree, state);
380 return 0;
381}
382
383/*
384 * split a given extent state struct in two, inserting the preallocated
385 * struct 'prealloc' as the newly created second half. 'split' indicates an
386 * offset inside 'orig' where it should be split.
387 *
388 * Before calling,
389 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
390 * are two extent state structs in the tree:
391 * prealloc: [orig->start, split - 1]
392 * orig: [ split, orig->end ]
393 *
394 * The tree locks are not taken by this function. They need to be held
395 * by the caller.
396 */
397static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
398 struct extent_state *prealloc, u64 split)
399{
400 struct rb_node *node;
401 prealloc->start = orig->start;
402 prealloc->end = split - 1;
403 prealloc->state = orig->state;
404 orig->start = split;
405
406 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
407 if (node) {
408 struct extent_state *found;
409 found = rb_entry(node, struct extent_state, rb_node);
410 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
411 free_extent_state(prealloc);
412 return -EEXIST;
413 }
Chris Mason70dec802008-01-29 09:59:12 -0500414 prealloc->tree = tree;
Chris Masond1310b22008-01-24 16:13:08 -0500415 return 0;
416}
417
418/*
419 * utility function to clear some bits in an extent state struct.
420 * it will optionally wake up any one waiting on this state (wake == 1), or
421 * forcibly remove the state from the tree (delete == 1).
422 *
423 * If no bits are set on the state struct after clearing things, the
424 * struct is freed and removed from the tree
425 */
426static int clear_state_bit(struct extent_io_tree *tree,
427 struct extent_state *state, int bits, int wake,
428 int delete)
429{
430 int ret = state->state & bits;
431
432 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
433 u64 range = state->end - state->start + 1;
434 WARN_ON(range > tree->dirty_bytes);
435 tree->dirty_bytes -= range;
436 }
Chris Mason291d6732008-01-29 15:55:23 -0500437 clear_state_cb(tree, state, bits);
Chris Masonb0c68f82008-01-31 11:05:37 -0500438 state->state &= ~bits;
Chris Masond1310b22008-01-24 16:13:08 -0500439 if (wake)
440 wake_up(&state->wq);
441 if (delete || state->state == 0) {
Chris Mason70dec802008-01-29 09:59:12 -0500442 if (state->tree) {
Chris Masonae9d1282008-02-01 15:42:15 -0500443 clear_state_cb(tree, state, state->state);
Chris Masond1310b22008-01-24 16:13:08 -0500444 rb_erase(&state->rb_node, &tree->state);
Chris Mason70dec802008-01-29 09:59:12 -0500445 state->tree = NULL;
Chris Masond1310b22008-01-24 16:13:08 -0500446 free_extent_state(state);
447 } else {
448 WARN_ON(1);
449 }
450 } else {
451 merge_state(tree, state);
452 }
453 return ret;
454}
455
456/*
457 * clear some bits on a range in the tree. This may require splitting
458 * or inserting elements in the tree, so the gfp mask is used to
459 * indicate which allocations or sleeping are allowed.
460 *
461 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
462 * the given range from the tree regardless of state (ie for truncate).
463 *
464 * the range [start, end] is inclusive.
465 *
466 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
467 * bits were already set, or zero if none of the bits were already set.
468 */
469int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
470 int bits, int wake, int delete, gfp_t mask)
471{
472 struct extent_state *state;
473 struct extent_state *prealloc = NULL;
474 struct rb_node *node;
475 unsigned long flags;
476 int err;
477 int set = 0;
478
479again:
480 if (!prealloc && (mask & __GFP_WAIT)) {
481 prealloc = alloc_extent_state(mask);
482 if (!prealloc)
483 return -ENOMEM;
484 }
485
Chris Mason70dec802008-01-29 09:59:12 -0500486 spin_lock_irqsave(&tree->lock, flags);
Chris Masond1310b22008-01-24 16:13:08 -0500487 /*
488 * this search will find the extents that end after
489 * our range starts
490 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500491 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500492 if (!node)
493 goto out;
494 state = rb_entry(node, struct extent_state, rb_node);
495 if (state->start > end)
496 goto out;
497 WARN_ON(state->end < start);
498
499 /*
500 * | ---- desired range ---- |
501 * | state | or
502 * | ------------- state -------------- |
503 *
504 * We need to split the extent we found, and may flip
505 * bits on second half.
506 *
507 * If the extent we found extends past our range, we
508 * just split and search again. It'll get split again
509 * the next time though.
510 *
511 * If the extent we found is inside our range, we clear
512 * the desired bit on it.
513 */
514
515 if (state->start < start) {
Chris Mason70dec802008-01-29 09:59:12 -0500516 if (!prealloc)
517 prealloc = alloc_extent_state(GFP_ATOMIC);
Chris Masond1310b22008-01-24 16:13:08 -0500518 err = split_state(tree, state, prealloc, start);
519 BUG_ON(err == -EEXIST);
520 prealloc = NULL;
521 if (err)
522 goto out;
523 if (state->end <= end) {
524 start = state->end + 1;
525 set |= clear_state_bit(tree, state, bits,
526 wake, delete);
527 } else {
528 start = state->start;
529 }
530 goto search_again;
531 }
532 /*
533 * | ---- desired range ---- |
534 * | state |
535 * We need to split the extent, and clear the bit
536 * on the first half
537 */
538 if (state->start <= end && state->end > end) {
Chris Mason70dec802008-01-29 09:59:12 -0500539 if (!prealloc)
540 prealloc = alloc_extent_state(GFP_ATOMIC);
Chris Masond1310b22008-01-24 16:13:08 -0500541 err = split_state(tree, state, prealloc, end + 1);
542 BUG_ON(err == -EEXIST);
543
544 if (wake)
545 wake_up(&state->wq);
546 set |= clear_state_bit(tree, prealloc, bits,
547 wake, delete);
548 prealloc = NULL;
549 goto out;
550 }
551
552 start = state->end + 1;
553 set |= clear_state_bit(tree, state, bits, wake, delete);
554 goto search_again;
555
556out:
Chris Mason70dec802008-01-29 09:59:12 -0500557 spin_unlock_irqrestore(&tree->lock, flags);
Chris Masond1310b22008-01-24 16:13:08 -0500558 if (prealloc)
559 free_extent_state(prealloc);
560
561 return set;
562
563search_again:
564 if (start > end)
565 goto out;
Chris Mason70dec802008-01-29 09:59:12 -0500566 spin_unlock_irqrestore(&tree->lock, flags);
Chris Masond1310b22008-01-24 16:13:08 -0500567 if (mask & __GFP_WAIT)
568 cond_resched();
569 goto again;
570}
571EXPORT_SYMBOL(clear_extent_bit);
572
573static int wait_on_state(struct extent_io_tree *tree,
574 struct extent_state *state)
575{
576 DEFINE_WAIT(wait);
577 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
Chris Mason70dec802008-01-29 09:59:12 -0500578 spin_unlock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500579 schedule();
Chris Mason70dec802008-01-29 09:59:12 -0500580 spin_lock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500581 finish_wait(&state->wq, &wait);
582 return 0;
583}
584
585/*
586 * waits for one or more bits to clear on a range in the state tree.
587 * The range [start, end] is inclusive.
588 * The tree lock is taken by this function
589 */
590int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
591{
592 struct extent_state *state;
593 struct rb_node *node;
594
Chris Mason70dec802008-01-29 09:59:12 -0500595 spin_lock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500596again:
597 while (1) {
598 /*
599 * this search will find all the extents that end after
600 * our range starts
601 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500602 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500603 if (!node)
604 break;
605
606 state = rb_entry(node, struct extent_state, rb_node);
607
608 if (state->start > end)
609 goto out;
610
611 if (state->state & bits) {
612 start = state->start;
613 atomic_inc(&state->refs);
614 wait_on_state(tree, state);
615 free_extent_state(state);
616 goto again;
617 }
618 start = state->end + 1;
619
620 if (start > end)
621 break;
622
623 if (need_resched()) {
Chris Mason70dec802008-01-29 09:59:12 -0500624 spin_unlock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500625 cond_resched();
Chris Mason70dec802008-01-29 09:59:12 -0500626 spin_lock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500627 }
628 }
629out:
Chris Mason70dec802008-01-29 09:59:12 -0500630 spin_unlock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -0500631 return 0;
632}
633EXPORT_SYMBOL(wait_extent_bit);
634
635static void set_state_bits(struct extent_io_tree *tree,
636 struct extent_state *state,
637 int bits)
638{
639 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
640 u64 range = state->end - state->start + 1;
641 tree->dirty_bytes += range;
642 }
Chris Mason291d6732008-01-29 15:55:23 -0500643 set_state_cb(tree, state, bits);
Chris Masonb0c68f82008-01-31 11:05:37 -0500644 state->state |= bits;
Chris Masond1310b22008-01-24 16:13:08 -0500645}
646
647/*
648 * set some bits on a range in the tree. This may require allocations
649 * or sleeping, so the gfp mask is used to indicate what is allowed.
650 *
651 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
652 * range already has the desired bits set. The start of the existing
653 * range is returned in failed_start in this case.
654 *
655 * [start, end] is inclusive
656 * This takes the tree lock.
657 */
658int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
659 int exclusive, u64 *failed_start, gfp_t mask)
660{
661 struct extent_state *state;
662 struct extent_state *prealloc = NULL;
663 struct rb_node *node;
664 unsigned long flags;
665 int err = 0;
666 int set;
667 u64 last_start;
668 u64 last_end;
669again:
670 if (!prealloc && (mask & __GFP_WAIT)) {
671 prealloc = alloc_extent_state(mask);
672 if (!prealloc)
673 return -ENOMEM;
674 }
675
Chris Mason70dec802008-01-29 09:59:12 -0500676 spin_lock_irqsave(&tree->lock, flags);
Chris Masond1310b22008-01-24 16:13:08 -0500677 /*
678 * this search will find all the extents that end after
679 * our range starts.
680 */
Chris Mason80ea96b2008-02-01 14:51:59 -0500681 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -0500682 if (!node) {
683 err = insert_state(tree, prealloc, start, end, bits);
684 prealloc = NULL;
685 BUG_ON(err == -EEXIST);
686 goto out;
687 }
688
689 state = rb_entry(node, struct extent_state, rb_node);
690 last_start = state->start;
691 last_end = state->end;
692
693 /*
694 * | ---- desired range ---- |
695 * | state |
696 *
697 * Just lock what we found and keep going
698 */
699 if (state->start == start && state->end <= end) {
700 set = state->state & bits;
701 if (set && exclusive) {
702 *failed_start = state->start;
703 err = -EEXIST;
704 goto out;
705 }
706 set_state_bits(tree, state, bits);
707 start = state->end + 1;
708 merge_state(tree, state);
709 goto search_again;
710 }
711
712 /*
713 * | ---- desired range ---- |
714 * | state |
715 * or
716 * | ------------- state -------------- |
717 *
718 * We need to split the extent we found, and may flip bits on
719 * second half.
720 *
721 * If the extent we found extends past our
722 * range, we just split and search again. It'll get split
723 * again the next time though.
724 *
725 * If the extent we found is inside our range, we set the
726 * desired bit on it.
727 */
728 if (state->start < start) {
729 set = state->state & bits;
730 if (exclusive && set) {
731 *failed_start = start;
732 err = -EEXIST;
733 goto out;
734 }
735 err = split_state(tree, state, prealloc, start);
736 BUG_ON(err == -EEXIST);
737 prealloc = NULL;
738 if (err)
739 goto out;
740 if (state->end <= end) {
741 set_state_bits(tree, state, bits);
742 start = state->end + 1;
743 merge_state(tree, state);
744 } else {
745 start = state->start;
746 }
747 goto search_again;
748 }
749 /*
750 * | ---- desired range ---- |
751 * | state | or | state |
752 *
753 * There's a hole, we need to insert something in it and
754 * ignore the extent we found.
755 */
756 if (state->start > start) {
757 u64 this_end;
758 if (end < last_start)
759 this_end = end;
760 else
761 this_end = last_start -1;
762 err = insert_state(tree, prealloc, start, this_end,
763 bits);
764 prealloc = NULL;
765 BUG_ON(err == -EEXIST);
766 if (err)
767 goto out;
768 start = this_end + 1;
769 goto search_again;
770 }
771 /*
772 * | ---- desired range ---- |
773 * | state |
774 * We need to split the extent, and set the bit
775 * on the first half
776 */
777 if (state->start <= end && state->end > end) {
778 set = state->state & bits;
779 if (exclusive && set) {
780 *failed_start = start;
781 err = -EEXIST;
782 goto out;
783 }
784 err = split_state(tree, state, prealloc, end + 1);
785 BUG_ON(err == -EEXIST);
786
787 set_state_bits(tree, prealloc, bits);
788 merge_state(tree, prealloc);
789 prealloc = NULL;
790 goto out;
791 }
792
793 goto search_again;
794
795out:
Chris Mason70dec802008-01-29 09:59:12 -0500796 spin_unlock_irqrestore(&tree->lock, flags);
Chris Masond1310b22008-01-24 16:13:08 -0500797 if (prealloc)
798 free_extent_state(prealloc);
799
800 return err;
801
802search_again:
803 if (start > end)
804 goto out;
Chris Mason70dec802008-01-29 09:59:12 -0500805 spin_unlock_irqrestore(&tree->lock, flags);
Chris Masond1310b22008-01-24 16:13:08 -0500806 if (mask & __GFP_WAIT)
807 cond_resched();
808 goto again;
809}
810EXPORT_SYMBOL(set_extent_bit);
811
812/* wrappers around set/clear extent bit */
813int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
814 gfp_t mask)
815{
816 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
817 mask);
818}
819EXPORT_SYMBOL(set_extent_dirty);
820
Chris Masone6dcd2d2008-07-17 12:53:50 -0400821int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
822 gfp_t mask)
823{
824 return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
825}
826EXPORT_SYMBOL(set_extent_ordered);
827
Chris Masond1310b22008-01-24 16:13:08 -0500828int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
829 int bits, gfp_t mask)
830{
831 return set_extent_bit(tree, start, end, bits, 0, NULL,
832 mask);
833}
834EXPORT_SYMBOL(set_extent_bits);
835
836int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
837 int bits, gfp_t mask)
838{
839 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
840}
841EXPORT_SYMBOL(clear_extent_bits);
842
843int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
844 gfp_t mask)
845{
846 return set_extent_bit(tree, start, end,
Chris Masone6dcd2d2008-07-17 12:53:50 -0400847 EXTENT_DELALLOC | EXTENT_DIRTY,
848 0, NULL, mask);
Chris Masond1310b22008-01-24 16:13:08 -0500849}
850EXPORT_SYMBOL(set_extent_delalloc);
851
852int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
853 gfp_t mask)
854{
855 return clear_extent_bit(tree, start, end,
856 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
857}
858EXPORT_SYMBOL(clear_extent_dirty);
859
Chris Masone6dcd2d2008-07-17 12:53:50 -0400860int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
861 gfp_t mask)
862{
863 return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
864}
865EXPORT_SYMBOL(clear_extent_ordered);
866
Chris Masond1310b22008-01-24 16:13:08 -0500867int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
868 gfp_t mask)
869{
870 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
871 mask);
872}
873EXPORT_SYMBOL(set_extent_new);
874
875int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
876 gfp_t mask)
877{
878 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
879}
880EXPORT_SYMBOL(clear_extent_new);
881
882int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
883 gfp_t mask)
884{
885 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
886 mask);
887}
888EXPORT_SYMBOL(set_extent_uptodate);
889
890int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
891 gfp_t mask)
892{
893 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
894}
895EXPORT_SYMBOL(clear_extent_uptodate);
896
897int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
898 gfp_t mask)
899{
900 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
901 0, NULL, mask);
902}
903EXPORT_SYMBOL(set_extent_writeback);
904
905int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
906 gfp_t mask)
907{
908 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
909}
910EXPORT_SYMBOL(clear_extent_writeback);
911
912int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
913{
914 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
915}
916EXPORT_SYMBOL(wait_on_extent_writeback);
917
Chris Masond352ac62008-09-29 15:18:18 -0400918/*
919 * either insert or lock state struct between start and end use mask to tell
920 * us if waiting is desired.
921 */
Chris Masond1310b22008-01-24 16:13:08 -0500922int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
923{
924 int err;
925 u64 failed_start;
926 while (1) {
927 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
928 &failed_start, mask);
929 if (err == -EEXIST && (mask & __GFP_WAIT)) {
930 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
931 start = failed_start;
932 } else {
933 break;
934 }
935 WARN_ON(start > end);
936 }
937 return err;
938}
939EXPORT_SYMBOL(lock_extent);
940
Josef Bacik25179202008-10-29 14:49:05 -0400941int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
942 gfp_t mask)
943{
944 int err;
945 u64 failed_start;
946
947 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
948 &failed_start, mask);
Yan Zheng66435582008-10-30 14:19:50 -0400949 if (err == -EEXIST) {
950 if (failed_start > start)
951 clear_extent_bit(tree, start, failed_start - 1,
952 EXTENT_LOCKED, 1, 0, mask);
Josef Bacik25179202008-10-29 14:49:05 -0400953 return 0;
Yan Zheng66435582008-10-30 14:19:50 -0400954 }
Josef Bacik25179202008-10-29 14:49:05 -0400955 return 1;
956}
957EXPORT_SYMBOL(try_lock_extent);
958
Chris Masond1310b22008-01-24 16:13:08 -0500959int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
960 gfp_t mask)
961{
962 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
963}
964EXPORT_SYMBOL(unlock_extent);
965
966/*
967 * helper function to set pages and extents in the tree dirty
968 */
969int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
970{
971 unsigned long index = start >> PAGE_CACHE_SHIFT;
972 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
973 struct page *page;
974
975 while (index <= end_index) {
976 page = find_get_page(tree->mapping, index);
977 BUG_ON(!page);
978 __set_page_dirty_nobuffers(page);
979 page_cache_release(page);
980 index++;
981 }
982 set_extent_dirty(tree, start, end, GFP_NOFS);
983 return 0;
984}
985EXPORT_SYMBOL(set_range_dirty);
986
987/*
988 * helper function to set both pages and extents in the tree writeback
989 */
990int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
991{
992 unsigned long index = start >> PAGE_CACHE_SHIFT;
993 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
994 struct page *page;
995
996 while (index <= end_index) {
997 page = find_get_page(tree->mapping, index);
998 BUG_ON(!page);
999 set_page_writeback(page);
1000 page_cache_release(page);
1001 index++;
1002 }
1003 set_extent_writeback(tree, start, end, GFP_NOFS);
1004 return 0;
1005}
1006EXPORT_SYMBOL(set_range_writeback);
1007
Chris Masond352ac62008-09-29 15:18:18 -04001008/*
1009 * find the first offset in the io tree with 'bits' set. zero is
1010 * returned if we find something, and *start_ret and *end_ret are
1011 * set to reflect the state struct that was found.
1012 *
1013 * If nothing was found, 1 is returned, < 0 on error
1014 */
Chris Masond1310b22008-01-24 16:13:08 -05001015int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1016 u64 *start_ret, u64 *end_ret, int bits)
1017{
1018 struct rb_node *node;
1019 struct extent_state *state;
1020 int ret = 1;
1021
Chris Mason70dec802008-01-29 09:59:12 -05001022 spin_lock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001023 /*
1024 * this search will find all the extents that end after
1025 * our range starts.
1026 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001027 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001028 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001029 goto out;
1030 }
1031
1032 while(1) {
1033 state = rb_entry(node, struct extent_state, rb_node);
1034 if (state->end >= start && (state->state & bits)) {
1035 *start_ret = state->start;
1036 *end_ret = state->end;
1037 ret = 0;
1038 break;
1039 }
1040 node = rb_next(node);
1041 if (!node)
1042 break;
1043 }
1044out:
Chris Mason70dec802008-01-29 09:59:12 -05001045 spin_unlock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001046 return ret;
1047}
1048EXPORT_SYMBOL(find_first_extent_bit);
1049
Chris Masond352ac62008-09-29 15:18:18 -04001050/* find the first state struct with 'bits' set after 'start', and
1051 * return it. tree->lock must be held. NULL will returned if
1052 * nothing was found after 'start'
1053 */
Chris Masond7fc6402008-02-18 12:12:38 -05001054struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1055 u64 start, int bits)
1056{
1057 struct rb_node *node;
1058 struct extent_state *state;
1059
1060 /*
1061 * this search will find all the extents that end after
1062 * our range starts.
1063 */
1064 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001065 if (!node) {
Chris Masond7fc6402008-02-18 12:12:38 -05001066 goto out;
1067 }
1068
1069 while(1) {
1070 state = rb_entry(node, struct extent_state, rb_node);
1071 if (state->end >= start && (state->state & bits)) {
1072 return state;
1073 }
1074 node = rb_next(node);
1075 if (!node)
1076 break;
1077 }
1078out:
1079 return NULL;
1080}
1081EXPORT_SYMBOL(find_first_extent_bit_state);
1082
Chris Masond352ac62008-09-29 15:18:18 -04001083/*
1084 * find a contiguous range of bytes in the file marked as delalloc, not
1085 * more than 'max_bytes'. start and end are used to return the range,
1086 *
1087 * 1 is returned if we find something, 0 if nothing was in the tree
1088 */
Chris Masonc8b97812008-10-29 14:49:59 -04001089static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1090 u64 *start, u64 *end, u64 max_bytes)
Chris Masond1310b22008-01-24 16:13:08 -05001091{
1092 struct rb_node *node;
1093 struct extent_state *state;
1094 u64 cur_start = *start;
1095 u64 found = 0;
1096 u64 total_bytes = 0;
1097
Chris Mason70dec802008-01-29 09:59:12 -05001098 spin_lock_irq(&tree->lock);
Chris Masonc8b97812008-10-29 14:49:59 -04001099
Chris Masond1310b22008-01-24 16:13:08 -05001100 /*
1101 * this search will find all the extents that end after
1102 * our range starts.
1103 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001104 node = tree_search(tree, cur_start);
Peter2b114d12008-04-01 11:21:40 -04001105 if (!node) {
Chris Mason3b951512008-04-17 11:29:12 -04001106 if (!found)
1107 *end = (u64)-1;
Chris Masond1310b22008-01-24 16:13:08 -05001108 goto out;
1109 }
1110
1111 while(1) {
1112 state = rb_entry(node, struct extent_state, rb_node);
Zheng Yan5b21f2e2008-09-26 10:05:38 -04001113 if (found && (state->start != cur_start ||
1114 (state->state & EXTENT_BOUNDARY))) {
Chris Masond1310b22008-01-24 16:13:08 -05001115 goto out;
1116 }
1117 if (!(state->state & EXTENT_DELALLOC)) {
1118 if (!found)
1119 *end = state->end;
1120 goto out;
1121 }
Chris Masond1310b22008-01-24 16:13:08 -05001122 if (!found)
1123 *start = state->start;
1124 found++;
1125 *end = state->end;
1126 cur_start = state->end + 1;
1127 node = rb_next(node);
1128 if (!node)
1129 break;
1130 total_bytes += state->end - state->start + 1;
1131 if (total_bytes >= max_bytes)
1132 break;
1133 }
1134out:
Chris Mason70dec802008-01-29 09:59:12 -05001135 spin_unlock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001136 return found;
1137}
1138
Chris Masonc8b97812008-10-29 14:49:59 -04001139static noinline int __unlock_for_delalloc(struct inode *inode,
1140 struct page *locked_page,
1141 u64 start, u64 end)
1142{
1143 int ret;
1144 struct page *pages[16];
1145 unsigned long index = start >> PAGE_CACHE_SHIFT;
1146 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1147 unsigned long nr_pages = end_index - index + 1;
1148 int i;
1149
1150 if (index == locked_page->index && end_index == index)
1151 return 0;
1152
1153 while(nr_pages > 0) {
1154 ret = find_get_pages_contig(inode->i_mapping, index,
1155 min(nr_pages, ARRAY_SIZE(pages)), pages);
1156 for (i = 0; i < ret; i++) {
1157 if (pages[i] != locked_page)
1158 unlock_page(pages[i]);
1159 page_cache_release(pages[i]);
1160 }
1161 nr_pages -= ret;
1162 index += ret;
1163 cond_resched();
1164 }
1165 return 0;
1166}
1167
1168static noinline int lock_delalloc_pages(struct inode *inode,
1169 struct page *locked_page,
1170 u64 delalloc_start,
1171 u64 delalloc_end)
1172{
1173 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1174 unsigned long start_index = index;
1175 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1176 unsigned long pages_locked = 0;
1177 struct page *pages[16];
1178 unsigned long nrpages;
1179 int ret;
1180 int i;
1181
1182 /* the caller is responsible for locking the start index */
1183 if (index == locked_page->index && index == end_index)
1184 return 0;
1185
1186 /* skip the page at the start index */
1187 nrpages = end_index - index + 1;
1188 while(nrpages > 0) {
1189 ret = find_get_pages_contig(inode->i_mapping, index,
1190 min(nrpages, ARRAY_SIZE(pages)), pages);
1191 if (ret == 0) {
1192 ret = -EAGAIN;
1193 goto done;
1194 }
1195 /* now we have an array of pages, lock them all */
1196 for (i = 0; i < ret; i++) {
1197 /*
1198 * the caller is taking responsibility for
1199 * locked_page
1200 */
1201 if (pages[i] != locked_page)
1202 lock_page(pages[i]);
1203 page_cache_release(pages[i]);
1204 }
1205 pages_locked += ret;
1206 nrpages -= ret;
1207 index += ret;
1208 cond_resched();
1209 }
1210 ret = 0;
1211done:
1212 if (ret && pages_locked) {
1213 __unlock_for_delalloc(inode, locked_page,
1214 delalloc_start,
1215 ((u64)(start_index + pages_locked - 1)) <<
1216 PAGE_CACHE_SHIFT);
1217 }
1218 return ret;
1219}
1220
1221/*
1222 * find a contiguous range of bytes in the file marked as delalloc, not
1223 * more than 'max_bytes'. start and end are used to return the range,
1224 *
1225 * 1 is returned if we find something, 0 if nothing was in the tree
1226 */
1227static noinline u64 find_lock_delalloc_range(struct inode *inode,
1228 struct extent_io_tree *tree,
1229 struct page *locked_page,
1230 u64 *start, u64 *end,
1231 u64 max_bytes)
1232{
1233 u64 delalloc_start;
1234 u64 delalloc_end;
1235 u64 found;
1236 int ret;
1237 int loops = 0;
1238
1239again:
1240 /* step one, find a bunch of delalloc bytes starting at start */
1241 delalloc_start = *start;
1242 delalloc_end = 0;
1243 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1244 max_bytes);
1245 if (!found) {
1246 *start = delalloc_start;
1247 *end = delalloc_end;
1248 return found;
1249 }
1250
1251 /*
1252 * make sure to limit the number of pages we try to lock down
1253 * if we're looping.
1254 */
1255 if (delalloc_end + 1 - delalloc_start > max_bytes && loops) {
1256 delalloc_end = (delalloc_start + PAGE_CACHE_SIZE - 1) &
1257 ~((u64)PAGE_CACHE_SIZE - 1);
1258 }
1259 /* step two, lock all the pages after the page that has start */
1260 ret = lock_delalloc_pages(inode, locked_page,
1261 delalloc_start, delalloc_end);
1262 if (ret == -EAGAIN) {
1263 /* some of the pages are gone, lets avoid looping by
1264 * shortening the size of the delalloc range we're searching
1265 */
1266 if (!loops) {
1267 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1268 max_bytes = PAGE_CACHE_SIZE - offset;
1269 loops = 1;
1270 goto again;
1271 } else {
1272 found = 0;
1273 goto out_failed;
1274 }
1275 }
1276 BUG_ON(ret);
1277
1278 /* step three, lock the state bits for the whole range */
1279 lock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
1280
1281 /* then test to make sure it is all still delalloc */
1282 ret = test_range_bit(tree, delalloc_start, delalloc_end,
1283 EXTENT_DELALLOC, 1);
1284 if (!ret) {
1285 unlock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
1286 __unlock_for_delalloc(inode, locked_page,
1287 delalloc_start, delalloc_end);
1288 cond_resched();
1289 goto again;
1290 }
1291 *start = delalloc_start;
1292 *end = delalloc_end;
1293out_failed:
1294 return found;
1295}
1296
1297int extent_clear_unlock_delalloc(struct inode *inode,
1298 struct extent_io_tree *tree,
1299 u64 start, u64 end, struct page *locked_page,
1300 int clear_dirty, int set_writeback,
1301 int end_writeback)
1302{
1303 int ret;
1304 struct page *pages[16];
1305 unsigned long index = start >> PAGE_CACHE_SHIFT;
1306 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1307 unsigned long nr_pages = end_index - index + 1;
1308 int i;
1309 int clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC;
1310
1311 if (clear_dirty)
1312 clear_bits |= EXTENT_DIRTY;
1313
1314 clear_extent_bit(tree, start, end, clear_bits, 1, 0, GFP_NOFS);
1315
1316 while(nr_pages > 0) {
1317 ret = find_get_pages_contig(inode->i_mapping, index,
1318 min(nr_pages, ARRAY_SIZE(pages)), pages);
1319 for (i = 0; i < ret; i++) {
1320 if (pages[i] == locked_page) {
1321 page_cache_release(pages[i]);
1322 continue;
1323 }
1324 if (clear_dirty)
1325 clear_page_dirty_for_io(pages[i]);
1326 if (set_writeback)
1327 set_page_writeback(pages[i]);
1328 if (end_writeback)
1329 end_page_writeback(pages[i]);
1330 unlock_page(pages[i]);
1331 page_cache_release(pages[i]);
1332 }
1333 nr_pages -= ret;
1334 index += ret;
1335 cond_resched();
1336 }
1337 return 0;
1338}
1339EXPORT_SYMBOL(extent_clear_unlock_delalloc);
1340
Chris Masond352ac62008-09-29 15:18:18 -04001341/*
1342 * count the number of bytes in the tree that have a given bit(s)
1343 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1344 * cached. The total number found is returned.
1345 */
Chris Masond1310b22008-01-24 16:13:08 -05001346u64 count_range_bits(struct extent_io_tree *tree,
1347 u64 *start, u64 search_end, u64 max_bytes,
1348 unsigned long bits)
1349{
1350 struct rb_node *node;
1351 struct extent_state *state;
1352 u64 cur_start = *start;
1353 u64 total_bytes = 0;
1354 int found = 0;
1355
1356 if (search_end <= cur_start) {
1357 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1358 WARN_ON(1);
1359 return 0;
1360 }
1361
Chris Mason70dec802008-01-29 09:59:12 -05001362 spin_lock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001363 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1364 total_bytes = tree->dirty_bytes;
1365 goto out;
1366 }
1367 /*
1368 * this search will find all the extents that end after
1369 * our range starts.
1370 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001371 node = tree_search(tree, cur_start);
Peter2b114d12008-04-01 11:21:40 -04001372 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001373 goto out;
1374 }
1375
1376 while(1) {
1377 state = rb_entry(node, struct extent_state, rb_node);
1378 if (state->start > search_end)
1379 break;
1380 if (state->end >= cur_start && (state->state & bits)) {
1381 total_bytes += min(search_end, state->end) + 1 -
1382 max(cur_start, state->start);
1383 if (total_bytes >= max_bytes)
1384 break;
1385 if (!found) {
1386 *start = state->start;
1387 found = 1;
1388 }
1389 }
1390 node = rb_next(node);
1391 if (!node)
1392 break;
1393 }
1394out:
Chris Mason70dec802008-01-29 09:59:12 -05001395 spin_unlock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001396 return total_bytes;
1397}
1398/*
1399 * helper function to lock both pages and extents in the tree.
1400 * pages must be locked first.
1401 */
1402int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1403{
1404 unsigned long index = start >> PAGE_CACHE_SHIFT;
1405 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1406 struct page *page;
1407 int err;
1408
1409 while (index <= end_index) {
1410 page = grab_cache_page(tree->mapping, index);
1411 if (!page) {
1412 err = -ENOMEM;
1413 goto failed;
1414 }
1415 if (IS_ERR(page)) {
1416 err = PTR_ERR(page);
1417 goto failed;
1418 }
1419 index++;
1420 }
1421 lock_extent(tree, start, end, GFP_NOFS);
1422 return 0;
1423
1424failed:
1425 /*
1426 * we failed above in getting the page at 'index', so we undo here
1427 * up to but not including the page at 'index'
1428 */
1429 end_index = index;
1430 index = start >> PAGE_CACHE_SHIFT;
1431 while (index < end_index) {
1432 page = find_get_page(tree->mapping, index);
1433 unlock_page(page);
1434 page_cache_release(page);
1435 index++;
1436 }
1437 return err;
1438}
1439EXPORT_SYMBOL(lock_range);
1440
1441/*
1442 * helper function to unlock both pages and extents in the tree.
1443 */
1444int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1445{
1446 unsigned long index = start >> PAGE_CACHE_SHIFT;
1447 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1448 struct page *page;
1449
1450 while (index <= end_index) {
1451 page = find_get_page(tree->mapping, index);
1452 unlock_page(page);
1453 page_cache_release(page);
1454 index++;
1455 }
1456 unlock_extent(tree, start, end, GFP_NOFS);
1457 return 0;
1458}
1459EXPORT_SYMBOL(unlock_range);
1460
Chris Masond352ac62008-09-29 15:18:18 -04001461/*
1462 * set the private field for a given byte offset in the tree. If there isn't
1463 * an extent_state there already, this does nothing.
1464 */
Chris Masond1310b22008-01-24 16:13:08 -05001465int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1466{
1467 struct rb_node *node;
1468 struct extent_state *state;
1469 int ret = 0;
1470
Chris Mason70dec802008-01-29 09:59:12 -05001471 spin_lock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001472 /*
1473 * this search will find all the extents that end after
1474 * our range starts.
1475 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001476 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001477 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001478 ret = -ENOENT;
1479 goto out;
1480 }
1481 state = rb_entry(node, struct extent_state, rb_node);
1482 if (state->start != start) {
1483 ret = -ENOENT;
1484 goto out;
1485 }
1486 state->private = private;
1487out:
Chris Mason70dec802008-01-29 09:59:12 -05001488 spin_unlock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001489 return ret;
1490}
1491
1492int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1493{
1494 struct rb_node *node;
1495 struct extent_state *state;
1496 int ret = 0;
1497
Chris Mason70dec802008-01-29 09:59:12 -05001498 spin_lock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001499 /*
1500 * this search will find all the extents that end after
1501 * our range starts.
1502 */
Chris Mason80ea96b2008-02-01 14:51:59 -05001503 node = tree_search(tree, start);
Peter2b114d12008-04-01 11:21:40 -04001504 if (!node) {
Chris Masond1310b22008-01-24 16:13:08 -05001505 ret = -ENOENT;
1506 goto out;
1507 }
1508 state = rb_entry(node, struct extent_state, rb_node);
1509 if (state->start != start) {
1510 ret = -ENOENT;
1511 goto out;
1512 }
1513 *private = state->private;
1514out:
Chris Mason70dec802008-01-29 09:59:12 -05001515 spin_unlock_irq(&tree->lock);
Chris Masond1310b22008-01-24 16:13:08 -05001516 return ret;
1517}
1518
1519/*
1520 * searches a range in the state tree for a given mask.
Chris Mason70dec802008-01-29 09:59:12 -05001521 * If 'filled' == 1, this returns 1 only if every extent in the tree
Chris Masond1310b22008-01-24 16:13:08 -05001522 * has the bits set. Otherwise, 1 is returned if any bit in the
1523 * range is found set.
1524 */
1525int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1526 int bits, int filled)
1527{
1528 struct extent_state *state = NULL;
1529 struct rb_node *node;
1530 int bitset = 0;
1531 unsigned long flags;
1532
Chris Mason70dec802008-01-29 09:59:12 -05001533 spin_lock_irqsave(&tree->lock, flags);
Chris Mason80ea96b2008-02-01 14:51:59 -05001534 node = tree_search(tree, start);
Chris Masond1310b22008-01-24 16:13:08 -05001535 while (node && start <= end) {
1536 state = rb_entry(node, struct extent_state, rb_node);
1537
1538 if (filled && state->start > start) {
1539 bitset = 0;
1540 break;
1541 }
1542
1543 if (state->start > end)
1544 break;
1545
1546 if (state->state & bits) {
1547 bitset = 1;
1548 if (!filled)
1549 break;
1550 } else if (filled) {
1551 bitset = 0;
1552 break;
1553 }
1554 start = state->end + 1;
1555 if (start > end)
1556 break;
1557 node = rb_next(node);
1558 if (!node) {
1559 if (filled)
1560 bitset = 0;
1561 break;
1562 }
1563 }
Chris Mason70dec802008-01-29 09:59:12 -05001564 spin_unlock_irqrestore(&tree->lock, flags);
Chris Masond1310b22008-01-24 16:13:08 -05001565 return bitset;
1566}
1567EXPORT_SYMBOL(test_range_bit);
1568
1569/*
1570 * helper function to set a given page up to date if all the
1571 * extents in the tree for that page are up to date
1572 */
1573static int check_page_uptodate(struct extent_io_tree *tree,
1574 struct page *page)
1575{
1576 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1577 u64 end = start + PAGE_CACHE_SIZE - 1;
1578 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1579 SetPageUptodate(page);
1580 return 0;
1581}
1582
1583/*
1584 * helper function to unlock a page if all the extents in the tree
1585 * for that page are unlocked
1586 */
1587static int check_page_locked(struct extent_io_tree *tree,
1588 struct page *page)
1589{
1590 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1591 u64 end = start + PAGE_CACHE_SIZE - 1;
1592 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1593 unlock_page(page);
1594 return 0;
1595}
1596
1597/*
1598 * helper function to end page writeback if all the extents
1599 * in the tree for that page are done with writeback
1600 */
1601static int check_page_writeback(struct extent_io_tree *tree,
1602 struct page *page)
1603{
1604 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1605 u64 end = start + PAGE_CACHE_SIZE - 1;
1606 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1607 end_page_writeback(page);
1608 return 0;
1609}
1610
1611/* lots and lots of room for performance fixes in the end_bio funcs */
1612
1613/*
1614 * after a writepage IO is done, we need to:
1615 * clear the uptodate bits on error
1616 * clear the writeback bits in the extent tree for this IO
1617 * end_page_writeback if the page has no more pending IO
1618 *
1619 * Scheduling is not allowed, so the extent state tree is expected
1620 * to have one and only one object corresponding to this IO.
1621 */
Chris Masond1310b22008-01-24 16:13:08 -05001622static void end_bio_extent_writepage(struct bio *bio, int err)
Chris Masond1310b22008-01-24 16:13:08 -05001623{
Chris Mason1259ab72008-05-12 13:39:03 -04001624 int uptodate = err == 0;
Chris Masond1310b22008-01-24 16:13:08 -05001625 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
David Woodhouse902b22f2008-08-20 08:51:49 -04001626 struct extent_io_tree *tree;
Chris Masond1310b22008-01-24 16:13:08 -05001627 u64 start;
1628 u64 end;
1629 int whole_page;
Chris Mason1259ab72008-05-12 13:39:03 -04001630 int ret;
Chris Masond1310b22008-01-24 16:13:08 -05001631
Chris Masond1310b22008-01-24 16:13:08 -05001632 do {
1633 struct page *page = bvec->bv_page;
David Woodhouse902b22f2008-08-20 08:51:49 -04001634 tree = &BTRFS_I(page->mapping->host)->io_tree;
1635
Chris Masond1310b22008-01-24 16:13:08 -05001636 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1637 bvec->bv_offset;
1638 end = start + bvec->bv_len - 1;
1639
1640 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1641 whole_page = 1;
1642 else
1643 whole_page = 0;
1644
1645 if (--bvec >= bio->bi_io_vec)
1646 prefetchw(&bvec->bv_page->flags);
Chris Mason1259ab72008-05-12 13:39:03 -04001647 if (tree->ops && tree->ops->writepage_end_io_hook) {
1648 ret = tree->ops->writepage_end_io_hook(page, start,
David Woodhouse902b22f2008-08-20 08:51:49 -04001649 end, NULL, uptodate);
Chris Mason1259ab72008-05-12 13:39:03 -04001650 if (ret)
1651 uptodate = 0;
1652 }
1653
1654 if (!uptodate && tree->ops &&
1655 tree->ops->writepage_io_failed_hook) {
1656 ret = tree->ops->writepage_io_failed_hook(bio, page,
David Woodhouse902b22f2008-08-20 08:51:49 -04001657 start, end, NULL);
Chris Mason1259ab72008-05-12 13:39:03 -04001658 if (ret == 0) {
Chris Mason1259ab72008-05-12 13:39:03 -04001659 uptodate = (err == 0);
1660 continue;
1661 }
1662 }
1663
Chris Masond1310b22008-01-24 16:13:08 -05001664 if (!uptodate) {
1665 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1666 ClearPageUptodate(page);
1667 SetPageError(page);
1668 }
Chris Mason70dec802008-01-29 09:59:12 -05001669
David Woodhouse902b22f2008-08-20 08:51:49 -04001670 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
Chris Masond1310b22008-01-24 16:13:08 -05001671
1672 if (whole_page)
1673 end_page_writeback(page);
1674 else
1675 check_page_writeback(tree, page);
Chris Masond1310b22008-01-24 16:13:08 -05001676 } while (bvec >= bio->bi_io_vec);
Chris Mason2b1f55b2008-09-24 11:48:04 -04001677
Chris Masond1310b22008-01-24 16:13:08 -05001678 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05001679}
1680
1681/*
1682 * after a readpage IO is done, we need to:
1683 * clear the uptodate bits on error
1684 * set the uptodate bits if things worked
1685 * set the page up to date if all extents in the tree are uptodate
1686 * clear the lock bit in the extent tree
1687 * unlock the page if there are no other extents locked for it
1688 *
1689 * Scheduling is not allowed, so the extent state tree is expected
1690 * to have one and only one object corresponding to this IO.
1691 */
Chris Masond1310b22008-01-24 16:13:08 -05001692static void end_bio_extent_readpage(struct bio *bio, int err)
Chris Masond1310b22008-01-24 16:13:08 -05001693{
1694 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1695 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
David Woodhouse902b22f2008-08-20 08:51:49 -04001696 struct extent_io_tree *tree;
Chris Masond1310b22008-01-24 16:13:08 -05001697 u64 start;
1698 u64 end;
1699 int whole_page;
1700 int ret;
1701
Chris Masond1310b22008-01-24 16:13:08 -05001702 do {
1703 struct page *page = bvec->bv_page;
David Woodhouse902b22f2008-08-20 08:51:49 -04001704 tree = &BTRFS_I(page->mapping->host)->io_tree;
1705
Chris Masond1310b22008-01-24 16:13:08 -05001706 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1707 bvec->bv_offset;
1708 end = start + bvec->bv_len - 1;
1709
1710 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1711 whole_page = 1;
1712 else
1713 whole_page = 0;
1714
1715 if (--bvec >= bio->bi_io_vec)
1716 prefetchw(&bvec->bv_page->flags);
1717
1718 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
Chris Mason70dec802008-01-29 09:59:12 -05001719 ret = tree->ops->readpage_end_io_hook(page, start, end,
David Woodhouse902b22f2008-08-20 08:51:49 -04001720 NULL);
Chris Masond1310b22008-01-24 16:13:08 -05001721 if (ret)
1722 uptodate = 0;
1723 }
Chris Mason7e383262008-04-09 16:28:12 -04001724 if (!uptodate && tree->ops &&
1725 tree->ops->readpage_io_failed_hook) {
1726 ret = tree->ops->readpage_io_failed_hook(bio, page,
David Woodhouse902b22f2008-08-20 08:51:49 -04001727 start, end, NULL);
Chris Mason7e383262008-04-09 16:28:12 -04001728 if (ret == 0) {
Chris Mason3b951512008-04-17 11:29:12 -04001729 uptodate =
1730 test_bit(BIO_UPTODATE, &bio->bi_flags);
Chris Mason7e383262008-04-09 16:28:12 -04001731 continue;
1732 }
1733 }
Chris Mason70dec802008-01-29 09:59:12 -05001734
David Woodhouse902b22f2008-08-20 08:51:49 -04001735 if (uptodate)
1736 set_extent_uptodate(tree, start, end,
1737 GFP_ATOMIC);
1738 unlock_extent(tree, start, end, GFP_ATOMIC);
Chris Masond1310b22008-01-24 16:13:08 -05001739
Chris Mason70dec802008-01-29 09:59:12 -05001740 if (whole_page) {
1741 if (uptodate) {
1742 SetPageUptodate(page);
1743 } else {
1744 ClearPageUptodate(page);
1745 SetPageError(page);
1746 }
Chris Masond1310b22008-01-24 16:13:08 -05001747 unlock_page(page);
Chris Mason70dec802008-01-29 09:59:12 -05001748 } else {
1749 if (uptodate) {
1750 check_page_uptodate(tree, page);
1751 } else {
1752 ClearPageUptodate(page);
1753 SetPageError(page);
1754 }
Chris Masond1310b22008-01-24 16:13:08 -05001755 check_page_locked(tree, page);
Chris Mason70dec802008-01-29 09:59:12 -05001756 }
Chris Masond1310b22008-01-24 16:13:08 -05001757 } while (bvec >= bio->bi_io_vec);
1758
1759 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05001760}
1761
1762/*
1763 * IO done from prepare_write is pretty simple, we just unlock
1764 * the structs in the extent tree when done, and set the uptodate bits
1765 * as appropriate.
1766 */
Chris Masond1310b22008-01-24 16:13:08 -05001767static void end_bio_extent_preparewrite(struct bio *bio, int err)
Chris Masond1310b22008-01-24 16:13:08 -05001768{
1769 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1770 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
David Woodhouse902b22f2008-08-20 08:51:49 -04001771 struct extent_io_tree *tree;
Chris Masond1310b22008-01-24 16:13:08 -05001772 u64 start;
1773 u64 end;
1774
Chris Masond1310b22008-01-24 16:13:08 -05001775 do {
1776 struct page *page = bvec->bv_page;
David Woodhouse902b22f2008-08-20 08:51:49 -04001777 tree = &BTRFS_I(page->mapping->host)->io_tree;
1778
Chris Masond1310b22008-01-24 16:13:08 -05001779 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1780 bvec->bv_offset;
1781 end = start + bvec->bv_len - 1;
1782
1783 if (--bvec >= bio->bi_io_vec)
1784 prefetchw(&bvec->bv_page->flags);
1785
1786 if (uptodate) {
1787 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1788 } else {
1789 ClearPageUptodate(page);
1790 SetPageError(page);
1791 }
1792
1793 unlock_extent(tree, start, end, GFP_ATOMIC);
1794
1795 } while (bvec >= bio->bi_io_vec);
1796
1797 bio_put(bio);
Chris Masond1310b22008-01-24 16:13:08 -05001798}
1799
1800static struct bio *
1801extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1802 gfp_t gfp_flags)
1803{
1804 struct bio *bio;
1805
1806 bio = bio_alloc(gfp_flags, nr_vecs);
1807
1808 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1809 while (!bio && (nr_vecs /= 2))
1810 bio = bio_alloc(gfp_flags, nr_vecs);
1811 }
1812
1813 if (bio) {
Chris Masone1c4b742008-04-22 13:26:46 -04001814 bio->bi_size = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001815 bio->bi_bdev = bdev;
1816 bio->bi_sector = first_sector;
1817 }
1818 return bio;
1819}
1820
Chris Masonc8b97812008-10-29 14:49:59 -04001821static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
1822 unsigned long bio_flags)
Chris Masond1310b22008-01-24 16:13:08 -05001823{
Chris Masond1310b22008-01-24 16:13:08 -05001824 int ret = 0;
Chris Mason70dec802008-01-29 09:59:12 -05001825 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1826 struct page *page = bvec->bv_page;
1827 struct extent_io_tree *tree = bio->bi_private;
Chris Mason70dec802008-01-29 09:59:12 -05001828 u64 start;
1829 u64 end;
1830
1831 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1832 end = start + bvec->bv_len - 1;
1833
David Woodhouse902b22f2008-08-20 08:51:49 -04001834 bio->bi_private = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05001835
1836 bio_get(bio);
1837
Chris Mason065631f2008-02-20 12:07:25 -05001838 if (tree->ops && tree->ops->submit_bio_hook)
Chris Masonf1885912008-04-09 16:28:12 -04001839 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
Chris Masonc8b97812008-10-29 14:49:59 -04001840 mirror_num, bio_flags);
Chris Mason0b86a832008-03-24 15:01:56 -04001841 else
1842 submit_bio(rw, bio);
Chris Masond1310b22008-01-24 16:13:08 -05001843 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1844 ret = -EOPNOTSUPP;
1845 bio_put(bio);
1846 return ret;
1847}
1848
1849static int submit_extent_page(int rw, struct extent_io_tree *tree,
1850 struct page *page, sector_t sector,
1851 size_t size, unsigned long offset,
1852 struct block_device *bdev,
1853 struct bio **bio_ret,
1854 unsigned long max_pages,
Chris Masonf1885912008-04-09 16:28:12 -04001855 bio_end_io_t end_io_func,
Chris Masonc8b97812008-10-29 14:49:59 -04001856 int mirror_num,
1857 unsigned long prev_bio_flags,
1858 unsigned long bio_flags)
Chris Masond1310b22008-01-24 16:13:08 -05001859{
1860 int ret = 0;
1861 struct bio *bio;
1862 int nr;
Chris Masonc8b97812008-10-29 14:49:59 -04001863 int contig = 0;
1864 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
1865 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
1866 size_t page_size = min(size, PAGE_CACHE_SIZE);
Chris Masond1310b22008-01-24 16:13:08 -05001867
1868 if (bio_ret && *bio_ret) {
1869 bio = *bio_ret;
Chris Masonc8b97812008-10-29 14:49:59 -04001870 if (old_compressed)
1871 contig = bio->bi_sector == sector;
1872 else
1873 contig = bio->bi_sector + (bio->bi_size >> 9) ==
1874 sector;
1875
1876 if (prev_bio_flags != bio_flags || !contig ||
Chris Mason239b14b2008-03-24 15:02:07 -04001877 (tree->ops && tree->ops->merge_bio_hook &&
Chris Masonc8b97812008-10-29 14:49:59 -04001878 tree->ops->merge_bio_hook(page, offset, page_size, bio,
1879 bio_flags)) ||
1880 bio_add_page(bio, page, page_size, offset) < page_size) {
1881 ret = submit_one_bio(rw, bio, mirror_num,
1882 prev_bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05001883 bio = NULL;
1884 } else {
1885 return 0;
1886 }
1887 }
Chris Masonc8b97812008-10-29 14:49:59 -04001888 if (this_compressed)
1889 nr = BIO_MAX_PAGES;
1890 else
1891 nr = bio_get_nr_vecs(bdev);
1892
Chris Masond1310b22008-01-24 16:13:08 -05001893 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1894 if (!bio) {
1895 printk("failed to allocate bio nr %d\n", nr);
1896 }
Chris Mason70dec802008-01-29 09:59:12 -05001897
Chris Masonc8b97812008-10-29 14:49:59 -04001898 bio_add_page(bio, page, page_size, offset);
Chris Masond1310b22008-01-24 16:13:08 -05001899 bio->bi_end_io = end_io_func;
1900 bio->bi_private = tree;
Chris Mason70dec802008-01-29 09:59:12 -05001901
Chris Masond1310b22008-01-24 16:13:08 -05001902 if (bio_ret) {
1903 *bio_ret = bio;
1904 } else {
Chris Masonc8b97812008-10-29 14:49:59 -04001905 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05001906 }
1907
1908 return ret;
1909}
1910
1911void set_page_extent_mapped(struct page *page)
1912{
1913 if (!PagePrivate(page)) {
1914 SetPagePrivate(page);
Chris Masond1310b22008-01-24 16:13:08 -05001915 page_cache_get(page);
Chris Mason6af118ce2008-07-22 11:18:07 -04001916 set_page_private(page, EXTENT_PAGE_PRIVATE);
Chris Masond1310b22008-01-24 16:13:08 -05001917 }
1918}
1919
1920void set_page_extent_head(struct page *page, unsigned long len)
1921{
1922 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1923}
1924
1925/*
1926 * basic readpage implementation. Locked extent state structs are inserted
1927 * into the tree that are removed when the IO is done (by the end_io
1928 * handlers)
1929 */
1930static int __extent_read_full_page(struct extent_io_tree *tree,
1931 struct page *page,
1932 get_extent_t *get_extent,
Chris Masonc8b97812008-10-29 14:49:59 -04001933 struct bio **bio, int mirror_num,
1934 unsigned long *bio_flags)
Chris Masond1310b22008-01-24 16:13:08 -05001935{
1936 struct inode *inode = page->mapping->host;
1937 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1938 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1939 u64 end;
1940 u64 cur = start;
1941 u64 extent_offset;
1942 u64 last_byte = i_size_read(inode);
1943 u64 block_start;
1944 u64 cur_end;
1945 sector_t sector;
1946 struct extent_map *em;
1947 struct block_device *bdev;
1948 int ret;
1949 int nr = 0;
1950 size_t page_offset = 0;
1951 size_t iosize;
Chris Masonc8b97812008-10-29 14:49:59 -04001952 size_t disk_io_size;
Chris Masond1310b22008-01-24 16:13:08 -05001953 size_t blocksize = inode->i_sb->s_blocksize;
Chris Masonc8b97812008-10-29 14:49:59 -04001954 unsigned long this_bio_flag = 0;
Chris Masond1310b22008-01-24 16:13:08 -05001955
1956 set_page_extent_mapped(page);
1957
1958 end = page_end;
1959 lock_extent(tree, start, end, GFP_NOFS);
1960
Chris Masonc8b97812008-10-29 14:49:59 -04001961 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
1962 char *userpage;
1963 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
1964
1965 if (zero_offset) {
1966 iosize = PAGE_CACHE_SIZE - zero_offset;
1967 userpage = kmap_atomic(page, KM_USER0);
1968 memset(userpage + zero_offset, 0, iosize);
1969 flush_dcache_page(page);
1970 kunmap_atomic(userpage, KM_USER0);
1971 }
1972 }
Chris Masond1310b22008-01-24 16:13:08 -05001973 while (cur <= end) {
1974 if (cur >= last_byte) {
1975 char *userpage;
1976 iosize = PAGE_CACHE_SIZE - page_offset;
1977 userpage = kmap_atomic(page, KM_USER0);
1978 memset(userpage + page_offset, 0, iosize);
1979 flush_dcache_page(page);
1980 kunmap_atomic(userpage, KM_USER0);
1981 set_extent_uptodate(tree, cur, cur + iosize - 1,
1982 GFP_NOFS);
1983 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1984 break;
1985 }
1986 em = get_extent(inode, page, page_offset, cur,
1987 end - cur + 1, 0);
1988 if (IS_ERR(em) || !em) {
1989 SetPageError(page);
1990 unlock_extent(tree, cur, end, GFP_NOFS);
1991 break;
1992 }
Chris Masond1310b22008-01-24 16:13:08 -05001993 extent_offset = cur - em->start;
Chris Masone6dcd2d2008-07-17 12:53:50 -04001994 if (extent_map_end(em) <= cur) {
1995printk("bad mapping em [%Lu %Lu] cur %Lu\n", em->start, extent_map_end(em), cur);
1996 }
Chris Masond1310b22008-01-24 16:13:08 -05001997 BUG_ON(extent_map_end(em) <= cur);
Chris Masone6dcd2d2008-07-17 12:53:50 -04001998 if (end < cur) {
1999printk("2bad mapping end %Lu cur %Lu\n", end, cur);
2000 }
Chris Masond1310b22008-01-24 16:13:08 -05002001 BUG_ON(end < cur);
2002
Chris Masonc8b97812008-10-29 14:49:59 -04002003 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2004 this_bio_flag = EXTENT_BIO_COMPRESSED;
2005
Chris Masond1310b22008-01-24 16:13:08 -05002006 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2007 cur_end = min(extent_map_end(em) - 1, end);
2008 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
Chris Masonc8b97812008-10-29 14:49:59 -04002009 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2010 disk_io_size = em->block_len;
2011 sector = em->block_start >> 9;
2012 } else {
2013 sector = (em->block_start + extent_offset) >> 9;
2014 disk_io_size = iosize;
2015 }
Chris Masond1310b22008-01-24 16:13:08 -05002016 bdev = em->bdev;
2017 block_start = em->block_start;
2018 free_extent_map(em);
2019 em = NULL;
2020
2021 /* we've found a hole, just zero and go on */
2022 if (block_start == EXTENT_MAP_HOLE) {
2023 char *userpage;
2024 userpage = kmap_atomic(page, KM_USER0);
2025 memset(userpage + page_offset, 0, iosize);
2026 flush_dcache_page(page);
2027 kunmap_atomic(userpage, KM_USER0);
2028
2029 set_extent_uptodate(tree, cur, cur + iosize - 1,
2030 GFP_NOFS);
2031 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2032 cur = cur + iosize;
2033 page_offset += iosize;
2034 continue;
2035 }
2036 /* the get_extent function already copied into the page */
2037 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
Chris Masona1b32a52008-09-05 16:09:51 -04002038 check_page_uptodate(tree, page);
Chris Masond1310b22008-01-24 16:13:08 -05002039 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2040 cur = cur + iosize;
2041 page_offset += iosize;
2042 continue;
2043 }
Chris Mason70dec802008-01-29 09:59:12 -05002044 /* we have an inline extent but it didn't get marked up
2045 * to date. Error out
2046 */
2047 if (block_start == EXTENT_MAP_INLINE) {
2048 SetPageError(page);
2049 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2050 cur = cur + iosize;
2051 page_offset += iosize;
2052 continue;
2053 }
Chris Masond1310b22008-01-24 16:13:08 -05002054
2055 ret = 0;
2056 if (tree->ops && tree->ops->readpage_io_hook) {
2057 ret = tree->ops->readpage_io_hook(page, cur,
2058 cur + iosize - 1);
2059 }
2060 if (!ret) {
Chris Mason89642222008-07-24 09:41:53 -04002061 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2062 pnr -= page->index;
Chris Masond1310b22008-01-24 16:13:08 -05002063 ret = submit_extent_page(READ, tree, page,
Chris Masonc8b97812008-10-29 14:49:59 -04002064 sector, disk_io_size, page_offset,
Chris Mason89642222008-07-24 09:41:53 -04002065 bdev, bio, pnr,
Chris Masonc8b97812008-10-29 14:49:59 -04002066 end_bio_extent_readpage, mirror_num,
2067 *bio_flags,
2068 this_bio_flag);
Chris Mason89642222008-07-24 09:41:53 -04002069 nr++;
Chris Masonc8b97812008-10-29 14:49:59 -04002070 *bio_flags = this_bio_flag;
Chris Masond1310b22008-01-24 16:13:08 -05002071 }
2072 if (ret)
2073 SetPageError(page);
2074 cur = cur + iosize;
2075 page_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002076 }
2077 if (!nr) {
2078 if (!PageError(page))
2079 SetPageUptodate(page);
2080 unlock_page(page);
2081 }
2082 return 0;
2083}
2084
2085int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2086 get_extent_t *get_extent)
2087{
2088 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04002089 unsigned long bio_flags = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002090 int ret;
2091
Chris Masonc8b97812008-10-29 14:49:59 -04002092 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
2093 &bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002094 if (bio)
Chris Masonc8b97812008-10-29 14:49:59 -04002095 submit_one_bio(READ, bio, 0, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002096 return ret;
2097}
2098EXPORT_SYMBOL(extent_read_full_page);
2099
2100/*
2101 * the writepage semantics are similar to regular writepage. extent
2102 * records are inserted to lock ranges in the tree, and as dirty areas
2103 * are found, they are marked writeback. Then the lock bits are removed
2104 * and the end_io handler clears the writeback ranges
2105 */
2106static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2107 void *data)
2108{
2109 struct inode *inode = page->mapping->host;
2110 struct extent_page_data *epd = data;
2111 struct extent_io_tree *tree = epd->tree;
2112 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2113 u64 delalloc_start;
2114 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2115 u64 end;
2116 u64 cur = start;
2117 u64 extent_offset;
2118 u64 last_byte = i_size_read(inode);
2119 u64 block_start;
2120 u64 iosize;
Chris Masone6dcd2d2008-07-17 12:53:50 -04002121 u64 unlock_start;
Chris Masond1310b22008-01-24 16:13:08 -05002122 sector_t sector;
2123 struct extent_map *em;
2124 struct block_device *bdev;
2125 int ret;
2126 int nr = 0;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002127 size_t pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002128 size_t blocksize;
2129 loff_t i_size = i_size_read(inode);
2130 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2131 u64 nr_delalloc;
2132 u64 delalloc_end;
Chris Masonc8b97812008-10-29 14:49:59 -04002133 int page_started;
2134 int compressed;
Chris Masond1310b22008-01-24 16:13:08 -05002135
2136 WARN_ON(!PageLocked(page));
Chris Mason7f3c74f2008-07-18 12:01:11 -04002137 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
Chris Mason211c17f2008-05-15 09:13:45 -04002138 if (page->index > end_index ||
Chris Mason7f3c74f2008-07-18 12:01:11 -04002139 (page->index == end_index && !pg_offset)) {
Chris Mason211c17f2008-05-15 09:13:45 -04002140 page->mapping->a_ops->invalidatepage(page, 0);
Chris Masond1310b22008-01-24 16:13:08 -05002141 unlock_page(page);
2142 return 0;
2143 }
2144
2145 if (page->index == end_index) {
2146 char *userpage;
2147
Chris Masond1310b22008-01-24 16:13:08 -05002148 userpage = kmap_atomic(page, KM_USER0);
Chris Mason7f3c74f2008-07-18 12:01:11 -04002149 memset(userpage + pg_offset, 0,
2150 PAGE_CACHE_SIZE - pg_offset);
Chris Masond1310b22008-01-24 16:13:08 -05002151 kunmap_atomic(userpage, KM_USER0);
Chris Mason211c17f2008-05-15 09:13:45 -04002152 flush_dcache_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05002153 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04002154 pg_offset = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002155
2156 set_page_extent_mapped(page);
2157
2158 delalloc_start = start;
2159 delalloc_end = 0;
Chris Masonc8b97812008-10-29 14:49:59 -04002160 page_started = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002161 while(delalloc_end < page_end) {
Chris Masonc8b97812008-10-29 14:49:59 -04002162 nr_delalloc = find_lock_delalloc_range(inode, tree,
2163 page,
2164 &delalloc_start,
Chris Masond1310b22008-01-24 16:13:08 -05002165 &delalloc_end,
2166 128 * 1024 * 1024);
2167 if (nr_delalloc == 0) {
2168 delalloc_start = delalloc_end + 1;
2169 continue;
2170 }
Chris Masonc8b97812008-10-29 14:49:59 -04002171 tree->ops->fill_delalloc(inode, page, delalloc_start,
2172 delalloc_end, &page_started);
Chris Masond1310b22008-01-24 16:13:08 -05002173 delalloc_start = delalloc_end + 1;
2174 }
Chris Masonc8b97812008-10-29 14:49:59 -04002175
2176 /* did the fill delalloc function already unlock and start the IO? */
2177 if (page_started) {
2178 return 0;
2179 }
2180
Chris Masond1310b22008-01-24 16:13:08 -05002181 lock_extent(tree, start, page_end, GFP_NOFS);
Chris Masone6dcd2d2008-07-17 12:53:50 -04002182 unlock_start = start;
Chris Masond1310b22008-01-24 16:13:08 -05002183
Chris Mason247e7432008-07-17 12:53:51 -04002184 if (tree->ops && tree->ops->writepage_start_hook) {
Chris Masonc8b97812008-10-29 14:49:59 -04002185 ret = tree->ops->writepage_start_hook(page, start,
2186 page_end);
Chris Mason247e7432008-07-17 12:53:51 -04002187 if (ret == -EAGAIN) {
2188 unlock_extent(tree, start, page_end, GFP_NOFS);
2189 redirty_page_for_writepage(wbc, page);
2190 unlock_page(page);
2191 return 0;
2192 }
2193 }
2194
Chris Masond1310b22008-01-24 16:13:08 -05002195 end = page_end;
2196 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
2197 printk("found delalloc bits after lock_extent\n");
2198 }
2199
2200 if (last_byte <= start) {
2201 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
Chris Masone6dcd2d2008-07-17 12:53:50 -04002202 unlock_extent(tree, start, page_end, GFP_NOFS);
2203 if (tree->ops && tree->ops->writepage_end_io_hook)
2204 tree->ops->writepage_end_io_hook(page, start,
2205 page_end, NULL, 1);
2206 unlock_start = page_end + 1;
Chris Masond1310b22008-01-24 16:13:08 -05002207 goto done;
2208 }
2209
2210 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
2211 blocksize = inode->i_sb->s_blocksize;
2212
2213 while (cur <= end) {
2214 if (cur >= last_byte) {
2215 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
Chris Masone6dcd2d2008-07-17 12:53:50 -04002216 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
2217 if (tree->ops && tree->ops->writepage_end_io_hook)
2218 tree->ops->writepage_end_io_hook(page, cur,
2219 page_end, NULL, 1);
2220 unlock_start = page_end + 1;
Chris Masond1310b22008-01-24 16:13:08 -05002221 break;
2222 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04002223 em = epd->get_extent(inode, page, pg_offset, cur,
Chris Masond1310b22008-01-24 16:13:08 -05002224 end - cur + 1, 1);
2225 if (IS_ERR(em) || !em) {
2226 SetPageError(page);
2227 break;
2228 }
2229
2230 extent_offset = cur - em->start;
2231 BUG_ON(extent_map_end(em) <= cur);
2232 BUG_ON(end < cur);
2233 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2234 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2235 sector = (em->block_start + extent_offset) >> 9;
2236 bdev = em->bdev;
2237 block_start = em->block_start;
Chris Masonc8b97812008-10-29 14:49:59 -04002238 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
Chris Masond1310b22008-01-24 16:13:08 -05002239 free_extent_map(em);
2240 em = NULL;
2241
Chris Masonc8b97812008-10-29 14:49:59 -04002242 /*
2243 * compressed and inline extents are written through other
2244 * paths in the FS
2245 */
2246 if (compressed || block_start == EXTENT_MAP_HOLE ||
Chris Masond1310b22008-01-24 16:13:08 -05002247 block_start == EXTENT_MAP_INLINE) {
2248 clear_extent_dirty(tree, cur,
2249 cur + iosize - 1, GFP_NOFS);
Chris Masone6dcd2d2008-07-17 12:53:50 -04002250
2251 unlock_extent(tree, unlock_start, cur + iosize -1,
2252 GFP_NOFS);
Chris Mason7f3c74f2008-07-18 12:01:11 -04002253
Chris Masonc8b97812008-10-29 14:49:59 -04002254 /*
2255 * end_io notification does not happen here for
2256 * compressed extents
2257 */
2258 if (!compressed && tree->ops &&
2259 tree->ops->writepage_end_io_hook)
Chris Masone6dcd2d2008-07-17 12:53:50 -04002260 tree->ops->writepage_end_io_hook(page, cur,
2261 cur + iosize - 1,
2262 NULL, 1);
Chris Masonc8b97812008-10-29 14:49:59 -04002263 else if (compressed) {
2264 /* we don't want to end_page_writeback on
2265 * a compressed extent. this happens
2266 * elsewhere
2267 */
2268 nr++;
2269 }
2270
2271 cur += iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002272 pg_offset += iosize;
Chris Masone6dcd2d2008-07-17 12:53:50 -04002273 unlock_start = cur;
Chris Masond1310b22008-01-24 16:13:08 -05002274 continue;
2275 }
Chris Masond1310b22008-01-24 16:13:08 -05002276 /* leave this out until we have a page_mkwrite call */
2277 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2278 EXTENT_DIRTY, 0)) {
2279 cur = cur + iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002280 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002281 continue;
2282 }
Chris Masonc8b97812008-10-29 14:49:59 -04002283
Chris Masond1310b22008-01-24 16:13:08 -05002284 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
2285 if (tree->ops && tree->ops->writepage_io_hook) {
2286 ret = tree->ops->writepage_io_hook(page, cur,
2287 cur + iosize - 1);
2288 } else {
2289 ret = 0;
2290 }
Chris Mason1259ab72008-05-12 13:39:03 -04002291 if (ret) {
Chris Masond1310b22008-01-24 16:13:08 -05002292 SetPageError(page);
Chris Mason1259ab72008-05-12 13:39:03 -04002293 } else {
Chris Masond1310b22008-01-24 16:13:08 -05002294 unsigned long max_nr = end_index + 1;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002295
Chris Masond1310b22008-01-24 16:13:08 -05002296 set_range_writeback(tree, cur, cur + iosize - 1);
2297 if (!PageWriteback(page)) {
2298 printk("warning page %lu not writeback, "
2299 "cur %llu end %llu\n", page->index,
2300 (unsigned long long)cur,
2301 (unsigned long long)end);
2302 }
2303
2304 ret = submit_extent_page(WRITE, tree, page, sector,
Chris Mason7f3c74f2008-07-18 12:01:11 -04002305 iosize, pg_offset, bdev,
Chris Masond1310b22008-01-24 16:13:08 -05002306 &epd->bio, max_nr,
Chris Masonc8b97812008-10-29 14:49:59 -04002307 end_bio_extent_writepage,
2308 0, 0, 0);
Chris Masond1310b22008-01-24 16:13:08 -05002309 if (ret)
2310 SetPageError(page);
2311 }
2312 cur = cur + iosize;
Chris Mason7f3c74f2008-07-18 12:01:11 -04002313 pg_offset += iosize;
Chris Masond1310b22008-01-24 16:13:08 -05002314 nr++;
2315 }
2316done:
2317 if (nr == 0) {
2318 /* make sure the mapping tag for page dirty gets cleared */
2319 set_page_writeback(page);
2320 end_page_writeback(page);
2321 }
Chris Masone6dcd2d2008-07-17 12:53:50 -04002322 if (unlock_start <= page_end)
2323 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
Chris Masond1310b22008-01-24 16:13:08 -05002324 unlock_page(page);
2325 return 0;
2326}
2327
Chris Masond1310b22008-01-24 16:13:08 -05002328/**
Chris Mason4bef0842008-09-08 11:18:08 -04002329 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
Chris Masond1310b22008-01-24 16:13:08 -05002330 * @mapping: address space structure to write
2331 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2332 * @writepage: function called for each page
2333 * @data: data passed to writepage function
2334 *
2335 * If a page is already under I/O, write_cache_pages() skips it, even
2336 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2337 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2338 * and msync() need to guarantee that all the data which was dirty at the time
2339 * the call was made get new I/O started against them. If wbc->sync_mode is
2340 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2341 * existing IO to complete.
2342 */
Chris Mason4bef0842008-09-08 11:18:08 -04002343int extent_write_cache_pages(struct extent_io_tree *tree,
2344 struct address_space *mapping,
2345 struct writeback_control *wbc,
2346 writepage_t writepage, void *data)
Chris Masond1310b22008-01-24 16:13:08 -05002347{
2348 struct backing_dev_info *bdi = mapping->backing_dev_info;
2349 int ret = 0;
2350 int done = 0;
2351 struct pagevec pvec;
2352 int nr_pages;
2353 pgoff_t index;
2354 pgoff_t end; /* Inclusive */
2355 int scanned = 0;
2356 int range_whole = 0;
2357
2358 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2359 wbc->encountered_congestion = 1;
2360 return 0;
2361 }
2362
2363 pagevec_init(&pvec, 0);
2364 if (wbc->range_cyclic) {
2365 index = mapping->writeback_index; /* Start from prev offset */
2366 end = -1;
2367 } else {
2368 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2369 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2370 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2371 range_whole = 1;
2372 scanned = 1;
2373 }
2374retry:
2375 while (!done && (index <= end) &&
2376 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2377 PAGECACHE_TAG_DIRTY,
2378 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2379 unsigned i;
2380
2381 scanned = 1;
2382 for (i = 0; i < nr_pages; i++) {
2383 struct page *page = pvec.pages[i];
2384
2385 /*
2386 * At this point we hold neither mapping->tree_lock nor
2387 * lock on the page itself: the page may be truncated or
2388 * invalidated (changing page->mapping to NULL), or even
2389 * swizzled back from swapper_space to tmpfs file
2390 * mapping
2391 */
Chris Mason4bef0842008-09-08 11:18:08 -04002392 if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2393 tree->ops->write_cache_pages_lock_hook(page);
2394 else
2395 lock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05002396
2397 if (unlikely(page->mapping != mapping)) {
2398 unlock_page(page);
2399 continue;
2400 }
2401
2402 if (!wbc->range_cyclic && page->index > end) {
2403 done = 1;
2404 unlock_page(page);
2405 continue;
2406 }
2407
2408 if (wbc->sync_mode != WB_SYNC_NONE)
2409 wait_on_page_writeback(page);
2410
2411 if (PageWriteback(page) ||
2412 !clear_page_dirty_for_io(page)) {
2413 unlock_page(page);
2414 continue;
2415 }
2416
2417 ret = (*writepage)(page, wbc, data);
2418
2419 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2420 unlock_page(page);
2421 ret = 0;
2422 }
2423 if (ret || (--(wbc->nr_to_write) <= 0))
2424 done = 1;
2425 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2426 wbc->encountered_congestion = 1;
2427 done = 1;
2428 }
2429 }
2430 pagevec_release(&pvec);
2431 cond_resched();
2432 }
2433 if (!scanned && !done) {
2434 /*
2435 * We hit the last page and there is more work to be done: wrap
2436 * back to the start of the file
2437 */
2438 scanned = 1;
2439 index = 0;
2440 goto retry;
2441 }
2442 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2443 mapping->writeback_index = index;
Chris Mason2b1f55b2008-09-24 11:48:04 -04002444
Chris Mason4bef0842008-09-08 11:18:08 -04002445 if (wbc->range_cont)
2446 wbc->range_start = index << PAGE_CACHE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05002447 return ret;
2448}
Chris Mason4bef0842008-09-08 11:18:08 -04002449EXPORT_SYMBOL(extent_write_cache_pages);
Chris Masond1310b22008-01-24 16:13:08 -05002450
2451int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2452 get_extent_t *get_extent,
2453 struct writeback_control *wbc)
2454{
2455 int ret;
2456 struct address_space *mapping = page->mapping;
2457 struct extent_page_data epd = {
2458 .bio = NULL,
2459 .tree = tree,
2460 .get_extent = get_extent,
2461 };
2462 struct writeback_control wbc_writepages = {
2463 .bdi = wbc->bdi,
2464 .sync_mode = WB_SYNC_NONE,
2465 .older_than_this = NULL,
2466 .nr_to_write = 64,
2467 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2468 .range_end = (loff_t)-1,
2469 };
2470
2471
2472 ret = __extent_writepage(page, wbc, &epd);
2473
Chris Mason4bef0842008-09-08 11:18:08 -04002474 extent_write_cache_pages(tree, mapping, &wbc_writepages,
2475 __extent_writepage, &epd);
Chris Masond1310b22008-01-24 16:13:08 -05002476 if (epd.bio) {
Chris Masonc8b97812008-10-29 14:49:59 -04002477 submit_one_bio(WRITE, epd.bio, 0, 0);
Chris Masond1310b22008-01-24 16:13:08 -05002478 }
2479 return ret;
2480}
2481EXPORT_SYMBOL(extent_write_full_page);
2482
2483
2484int extent_writepages(struct extent_io_tree *tree,
2485 struct address_space *mapping,
2486 get_extent_t *get_extent,
2487 struct writeback_control *wbc)
2488{
2489 int ret = 0;
2490 struct extent_page_data epd = {
2491 .bio = NULL,
2492 .tree = tree,
2493 .get_extent = get_extent,
2494 };
2495
Chris Mason4bef0842008-09-08 11:18:08 -04002496 ret = extent_write_cache_pages(tree, mapping, wbc,
2497 __extent_writepage, &epd);
Chris Masond1310b22008-01-24 16:13:08 -05002498 if (epd.bio) {
Chris Masonc8b97812008-10-29 14:49:59 -04002499 submit_one_bio(WRITE, epd.bio, 0, 0);
Chris Masond1310b22008-01-24 16:13:08 -05002500 }
2501 return ret;
2502}
2503EXPORT_SYMBOL(extent_writepages);
2504
2505int extent_readpages(struct extent_io_tree *tree,
2506 struct address_space *mapping,
2507 struct list_head *pages, unsigned nr_pages,
2508 get_extent_t get_extent)
2509{
2510 struct bio *bio = NULL;
2511 unsigned page_idx;
2512 struct pagevec pvec;
Chris Masonc8b97812008-10-29 14:49:59 -04002513 unsigned long bio_flags = 0;
Chris Masond1310b22008-01-24 16:13:08 -05002514
2515 pagevec_init(&pvec, 0);
2516 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2517 struct page *page = list_entry(pages->prev, struct page, lru);
2518
2519 prefetchw(&page->flags);
2520 list_del(&page->lru);
2521 /*
2522 * what we want to do here is call add_to_page_cache_lru,
2523 * but that isn't exported, so we reproduce it here
2524 */
2525 if (!add_to_page_cache(page, mapping,
2526 page->index, GFP_KERNEL)) {
2527
2528 /* open coding of lru_cache_add, also not exported */
2529 page_cache_get(page);
2530 if (!pagevec_add(&pvec, page))
2531 __pagevec_lru_add(&pvec);
Chris Masonf1885912008-04-09 16:28:12 -04002532 __extent_read_full_page(tree, page, get_extent,
Chris Masonc8b97812008-10-29 14:49:59 -04002533 &bio, 0, &bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002534 }
2535 page_cache_release(page);
2536 }
2537 if (pagevec_count(&pvec))
2538 __pagevec_lru_add(&pvec);
2539 BUG_ON(!list_empty(pages));
2540 if (bio)
Chris Masonc8b97812008-10-29 14:49:59 -04002541 submit_one_bio(READ, bio, 0, bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05002542 return 0;
2543}
2544EXPORT_SYMBOL(extent_readpages);
2545
2546/*
2547 * basic invalidatepage code, this waits on any locked or writeback
2548 * ranges corresponding to the page, and then deletes any extent state
2549 * records from the tree
2550 */
2551int extent_invalidatepage(struct extent_io_tree *tree,
2552 struct page *page, unsigned long offset)
2553{
2554 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2555 u64 end = start + PAGE_CACHE_SIZE - 1;
2556 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2557
2558 start += (offset + blocksize -1) & ~(blocksize - 1);
2559 if (start > end)
2560 return 0;
2561
2562 lock_extent(tree, start, end, GFP_NOFS);
2563 wait_on_extent_writeback(tree, start, end);
2564 clear_extent_bit(tree, start, end,
2565 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2566 1, 1, GFP_NOFS);
2567 return 0;
2568}
2569EXPORT_SYMBOL(extent_invalidatepage);
2570
2571/*
2572 * simple commit_write call, set_range_dirty is used to mark both
2573 * the pages and the extent records as dirty
2574 */
2575int extent_commit_write(struct extent_io_tree *tree,
2576 struct inode *inode, struct page *page,
2577 unsigned from, unsigned to)
2578{
2579 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2580
2581 set_page_extent_mapped(page);
2582 set_page_dirty(page);
2583
2584 if (pos > inode->i_size) {
2585 i_size_write(inode, pos);
2586 mark_inode_dirty(inode);
2587 }
2588 return 0;
2589}
2590EXPORT_SYMBOL(extent_commit_write);
2591
2592int extent_prepare_write(struct extent_io_tree *tree,
2593 struct inode *inode, struct page *page,
2594 unsigned from, unsigned to, get_extent_t *get_extent)
2595{
2596 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2597 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2598 u64 block_start;
2599 u64 orig_block_start;
2600 u64 block_end;
2601 u64 cur_end;
2602 struct extent_map *em;
2603 unsigned blocksize = 1 << inode->i_blkbits;
2604 size_t page_offset = 0;
2605 size_t block_off_start;
2606 size_t block_off_end;
2607 int err = 0;
2608 int iocount = 0;
2609 int ret = 0;
2610 int isnew;
2611
2612 set_page_extent_mapped(page);
2613
2614 block_start = (page_start + from) & ~((u64)blocksize - 1);
2615 block_end = (page_start + to - 1) | (blocksize - 1);
2616 orig_block_start = block_start;
2617
2618 lock_extent(tree, page_start, page_end, GFP_NOFS);
2619 while(block_start <= block_end) {
2620 em = get_extent(inode, page, page_offset, block_start,
2621 block_end - block_start + 1, 1);
2622 if (IS_ERR(em) || !em) {
2623 goto err;
2624 }
2625 cur_end = min(block_end, extent_map_end(em) - 1);
2626 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2627 block_off_end = block_off_start + blocksize;
2628 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2629
2630 if (!PageUptodate(page) && isnew &&
2631 (block_off_end > to || block_off_start < from)) {
2632 void *kaddr;
2633
2634 kaddr = kmap_atomic(page, KM_USER0);
2635 if (block_off_end > to)
2636 memset(kaddr + to, 0, block_off_end - to);
2637 if (block_off_start < from)
2638 memset(kaddr + block_off_start, 0,
2639 from - block_off_start);
2640 flush_dcache_page(page);
2641 kunmap_atomic(kaddr, KM_USER0);
2642 }
2643 if ((em->block_start != EXTENT_MAP_HOLE &&
2644 em->block_start != EXTENT_MAP_INLINE) &&
2645 !isnew && !PageUptodate(page) &&
2646 (block_off_end > to || block_off_start < from) &&
2647 !test_range_bit(tree, block_start, cur_end,
2648 EXTENT_UPTODATE, 1)) {
2649 u64 sector;
2650 u64 extent_offset = block_start - em->start;
2651 size_t iosize;
2652 sector = (em->block_start + extent_offset) >> 9;
2653 iosize = (cur_end - block_start + blocksize) &
2654 ~((u64)blocksize - 1);
2655 /*
2656 * we've already got the extent locked, but we
2657 * need to split the state such that our end_bio
2658 * handler can clear the lock.
2659 */
2660 set_extent_bit(tree, block_start,
2661 block_start + iosize - 1,
2662 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2663 ret = submit_extent_page(READ, tree, page,
2664 sector, iosize, page_offset, em->bdev,
2665 NULL, 1,
Chris Masonc8b97812008-10-29 14:49:59 -04002666 end_bio_extent_preparewrite, 0,
2667 0, 0);
Chris Masond1310b22008-01-24 16:13:08 -05002668 iocount++;
2669 block_start = block_start + iosize;
2670 } else {
2671 set_extent_uptodate(tree, block_start, cur_end,
2672 GFP_NOFS);
2673 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2674 block_start = cur_end + 1;
2675 }
2676 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2677 free_extent_map(em);
2678 }
2679 if (iocount) {
2680 wait_extent_bit(tree, orig_block_start,
2681 block_end, EXTENT_LOCKED);
2682 }
2683 check_page_uptodate(tree, page);
2684err:
2685 /* FIXME, zero out newly allocated blocks on error */
2686 return err;
2687}
2688EXPORT_SYMBOL(extent_prepare_write);
2689
2690/*
Chris Mason7b13b7b2008-04-18 10:29:50 -04002691 * a helper for releasepage, this tests for areas of the page that
2692 * are locked or under IO and drops the related state bits if it is safe
2693 * to drop the page.
2694 */
2695int try_release_extent_state(struct extent_map_tree *map,
2696 struct extent_io_tree *tree, struct page *page,
2697 gfp_t mask)
2698{
2699 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2700 u64 end = start + PAGE_CACHE_SIZE - 1;
2701 int ret = 1;
2702
Chris Mason211f90e2008-07-18 11:56:15 -04002703 if (test_range_bit(tree, start, end,
2704 EXTENT_IOBITS | EXTENT_ORDERED, 0))
Chris Mason7b13b7b2008-04-18 10:29:50 -04002705 ret = 0;
2706 else {
2707 if ((mask & GFP_NOFS) == GFP_NOFS)
2708 mask = GFP_NOFS;
2709 clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
2710 1, 1, mask);
2711 }
2712 return ret;
2713}
2714EXPORT_SYMBOL(try_release_extent_state);
2715
2716/*
Chris Masond1310b22008-01-24 16:13:08 -05002717 * a helper for releasepage. As long as there are no locked extents
2718 * in the range corresponding to the page, both state records and extent
2719 * map records are removed
2720 */
2721int try_release_extent_mapping(struct extent_map_tree *map,
Chris Mason70dec802008-01-29 09:59:12 -05002722 struct extent_io_tree *tree, struct page *page,
2723 gfp_t mask)
Chris Masond1310b22008-01-24 16:13:08 -05002724{
2725 struct extent_map *em;
2726 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2727 u64 end = start + PAGE_CACHE_SIZE - 1;
Chris Mason7b13b7b2008-04-18 10:29:50 -04002728
Chris Mason70dec802008-01-29 09:59:12 -05002729 if ((mask & __GFP_WAIT) &&
2730 page->mapping->host->i_size > 16 * 1024 * 1024) {
Yan39b56372008-02-15 10:40:50 -05002731 u64 len;
Chris Mason70dec802008-01-29 09:59:12 -05002732 while (start <= end) {
Yan39b56372008-02-15 10:40:50 -05002733 len = end - start + 1;
Chris Mason70dec802008-01-29 09:59:12 -05002734 spin_lock(&map->lock);
Yan39b56372008-02-15 10:40:50 -05002735 em = lookup_extent_mapping(map, start, len);
Chris Mason70dec802008-01-29 09:59:12 -05002736 if (!em || IS_ERR(em)) {
2737 spin_unlock(&map->lock);
2738 break;
2739 }
Chris Mason7f3c74f2008-07-18 12:01:11 -04002740 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2741 em->start != start) {
Chris Mason70dec802008-01-29 09:59:12 -05002742 spin_unlock(&map->lock);
2743 free_extent_map(em);
2744 break;
2745 }
2746 if (!test_range_bit(tree, em->start,
2747 extent_map_end(em) - 1,
Chris Masonc8b97812008-10-29 14:49:59 -04002748 EXTENT_LOCKED | EXTENT_WRITEBACK |
2749 EXTENT_ORDERED,
2750 0)) {
Chris Mason70dec802008-01-29 09:59:12 -05002751 remove_extent_mapping(map, em);
2752 /* once for the rb tree */
2753 free_extent_map(em);
2754 }
2755 start = extent_map_end(em);
Chris Masond1310b22008-01-24 16:13:08 -05002756 spin_unlock(&map->lock);
Chris Mason70dec802008-01-29 09:59:12 -05002757
2758 /* once for us */
Chris Masond1310b22008-01-24 16:13:08 -05002759 free_extent_map(em);
2760 }
Chris Masond1310b22008-01-24 16:13:08 -05002761 }
Chris Mason7b13b7b2008-04-18 10:29:50 -04002762 return try_release_extent_state(map, tree, page, mask);
Chris Masond1310b22008-01-24 16:13:08 -05002763}
2764EXPORT_SYMBOL(try_release_extent_mapping);
2765
2766sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2767 get_extent_t *get_extent)
2768{
2769 struct inode *inode = mapping->host;
2770 u64 start = iblock << inode->i_blkbits;
2771 sector_t sector = 0;
2772 struct extent_map *em;
2773
2774 em = get_extent(inode, NULL, 0, start, (1 << inode->i_blkbits), 0);
2775 if (!em || IS_ERR(em))
2776 return 0;
2777
2778 if (em->block_start == EXTENT_MAP_INLINE ||
2779 em->block_start == EXTENT_MAP_HOLE)
2780 goto out;
2781
2782 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
Chris Masond1310b22008-01-24 16:13:08 -05002783out:
2784 free_extent_map(em);
2785 return sector;
2786}
2787
Chris Masond1310b22008-01-24 16:13:08 -05002788static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2789 unsigned long i)
2790{
2791 struct page *p;
2792 struct address_space *mapping;
2793
2794 if (i == 0)
2795 return eb->first_page;
2796 i += eb->start >> PAGE_CACHE_SHIFT;
2797 mapping = eb->first_page->mapping;
Chris Mason33958dc2008-07-30 10:29:12 -04002798 if (!mapping)
2799 return NULL;
Sven Wegener0ee0fda2008-07-30 16:54:26 -04002800
2801 /*
2802 * extent_buffer_page is only called after pinning the page
2803 * by increasing the reference count. So we know the page must
2804 * be in the radix tree.
2805 */
Sven Wegener0ee0fda2008-07-30 16:54:26 -04002806 rcu_read_lock();
Chris Masond1310b22008-01-24 16:13:08 -05002807 p = radix_tree_lookup(&mapping->page_tree, i);
Sven Wegener0ee0fda2008-07-30 16:54:26 -04002808 rcu_read_unlock();
Chris Mason2b1f55b2008-09-24 11:48:04 -04002809
Chris Masond1310b22008-01-24 16:13:08 -05002810 return p;
2811}
2812
Chris Mason6af118ce2008-07-22 11:18:07 -04002813static inline unsigned long num_extent_pages(u64 start, u64 len)
Chris Masonce9adaa2008-04-09 16:28:12 -04002814{
Chris Mason6af118ce2008-07-22 11:18:07 -04002815 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2816 (start >> PAGE_CACHE_SHIFT);
Chris Mason728131d2008-04-09 16:28:12 -04002817}
2818
Chris Masond1310b22008-01-24 16:13:08 -05002819static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2820 u64 start,
2821 unsigned long len,
2822 gfp_t mask)
2823{
2824 struct extent_buffer *eb = NULL;
Chris Mason4bef0842008-09-08 11:18:08 -04002825#ifdef LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -04002826 unsigned long flags;
Chris Mason4bef0842008-09-08 11:18:08 -04002827#endif
Chris Masond1310b22008-01-24 16:13:08 -05002828
Chris Masond1310b22008-01-24 16:13:08 -05002829 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
Chris Masond1310b22008-01-24 16:13:08 -05002830 eb->start = start;
2831 eb->len = len;
Chris Masona61e6f22008-07-22 11:18:08 -04002832 mutex_init(&eb->mutex);
Chris Mason4bef0842008-09-08 11:18:08 -04002833#ifdef LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -04002834 spin_lock_irqsave(&leak_lock, flags);
2835 list_add(&eb->leak_list, &buffers);
2836 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -04002837#endif
Chris Masond1310b22008-01-24 16:13:08 -05002838 atomic_set(&eb->refs, 1);
2839
2840 return eb;
2841}
2842
2843static void __free_extent_buffer(struct extent_buffer *eb)
2844{
Chris Mason4bef0842008-09-08 11:18:08 -04002845#ifdef LEAK_DEBUG
Chris Mason2d2ae542008-03-26 16:24:23 -04002846 unsigned long flags;
2847 spin_lock_irqsave(&leak_lock, flags);
2848 list_del(&eb->leak_list);
2849 spin_unlock_irqrestore(&leak_lock, flags);
Chris Mason4bef0842008-09-08 11:18:08 -04002850#endif
Chris Masond1310b22008-01-24 16:13:08 -05002851 kmem_cache_free(extent_buffer_cache, eb);
2852}
2853
2854struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2855 u64 start, unsigned long len,
2856 struct page *page0,
2857 gfp_t mask)
2858{
2859 unsigned long num_pages = num_extent_pages(start, len);
2860 unsigned long i;
2861 unsigned long index = start >> PAGE_CACHE_SHIFT;
2862 struct extent_buffer *eb;
Chris Mason6af118ce2008-07-22 11:18:07 -04002863 struct extent_buffer *exists = NULL;
Chris Masond1310b22008-01-24 16:13:08 -05002864 struct page *p;
2865 struct address_space *mapping = tree->mapping;
2866 int uptodate = 1;
2867
Chris Mason6af118ce2008-07-22 11:18:07 -04002868 spin_lock(&tree->buffer_lock);
2869 eb = buffer_search(tree, start);
2870 if (eb) {
2871 atomic_inc(&eb->refs);
2872 spin_unlock(&tree->buffer_lock);
Josef Bacik0f9dd462008-09-23 13:14:11 -04002873 mark_page_accessed(eb->first_page);
Chris Mason6af118ce2008-07-22 11:18:07 -04002874 return eb;
2875 }
2876 spin_unlock(&tree->buffer_lock);
2877
Chris Masond1310b22008-01-24 16:13:08 -05002878 eb = __alloc_extent_buffer(tree, start, len, mask);
Peter2b114d12008-04-01 11:21:40 -04002879 if (!eb)
Chris Masond1310b22008-01-24 16:13:08 -05002880 return NULL;
2881
Chris Masond1310b22008-01-24 16:13:08 -05002882 if (page0) {
2883 eb->first_page = page0;
2884 i = 1;
2885 index++;
2886 page_cache_get(page0);
2887 mark_page_accessed(page0);
2888 set_page_extent_mapped(page0);
Chris Masond1310b22008-01-24 16:13:08 -05002889 set_page_extent_head(page0, len);
Chris Masonf1885912008-04-09 16:28:12 -04002890 uptodate = PageUptodate(page0);
Chris Masond1310b22008-01-24 16:13:08 -05002891 } else {
2892 i = 0;
2893 }
2894 for (; i < num_pages; i++, index++) {
2895 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2896 if (!p) {
2897 WARN_ON(1);
Chris Mason6af118ce2008-07-22 11:18:07 -04002898 goto free_eb;
Chris Masond1310b22008-01-24 16:13:08 -05002899 }
2900 set_page_extent_mapped(p);
2901 mark_page_accessed(p);
2902 if (i == 0) {
2903 eb->first_page = p;
2904 set_page_extent_head(p, len);
2905 } else {
2906 set_page_private(p, EXTENT_PAGE_PRIVATE);
2907 }
2908 if (!PageUptodate(p))
2909 uptodate = 0;
2910 unlock_page(p);
2911 }
2912 if (uptodate)
2913 eb->flags |= EXTENT_UPTODATE;
2914 eb->flags |= EXTENT_BUFFER_FILLED;
2915
Chris Mason6af118ce2008-07-22 11:18:07 -04002916 spin_lock(&tree->buffer_lock);
2917 exists = buffer_tree_insert(tree, start, &eb->rb_node);
2918 if (exists) {
2919 /* add one reference for the caller */
2920 atomic_inc(&exists->refs);
2921 spin_unlock(&tree->buffer_lock);
2922 goto free_eb;
2923 }
2924 spin_unlock(&tree->buffer_lock);
2925
2926 /* add one reference for the tree */
2927 atomic_inc(&eb->refs);
Chris Masond1310b22008-01-24 16:13:08 -05002928 return eb;
2929
Chris Mason6af118ce2008-07-22 11:18:07 -04002930free_eb:
Chris Masond1310b22008-01-24 16:13:08 -05002931 if (!atomic_dec_and_test(&eb->refs))
Chris Mason6af118ce2008-07-22 11:18:07 -04002932 return exists;
2933 for (index = 1; index < i; index++)
Chris Masond1310b22008-01-24 16:13:08 -05002934 page_cache_release(extent_buffer_page(eb, index));
Chris Mason6af118ce2008-07-22 11:18:07 -04002935 page_cache_release(extent_buffer_page(eb, 0));
Chris Masond1310b22008-01-24 16:13:08 -05002936 __free_extent_buffer(eb);
Chris Mason6af118ce2008-07-22 11:18:07 -04002937 return exists;
Chris Masond1310b22008-01-24 16:13:08 -05002938}
2939EXPORT_SYMBOL(alloc_extent_buffer);
2940
2941struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
2942 u64 start, unsigned long len,
2943 gfp_t mask)
2944{
Chris Masond1310b22008-01-24 16:13:08 -05002945 struct extent_buffer *eb;
Chris Masond1310b22008-01-24 16:13:08 -05002946
Chris Mason6af118ce2008-07-22 11:18:07 -04002947 spin_lock(&tree->buffer_lock);
2948 eb = buffer_search(tree, start);
2949 if (eb)
2950 atomic_inc(&eb->refs);
2951 spin_unlock(&tree->buffer_lock);
Chris Masond1310b22008-01-24 16:13:08 -05002952
Josef Bacik0f9dd462008-09-23 13:14:11 -04002953 if (eb)
2954 mark_page_accessed(eb->first_page);
2955
Chris Masond1310b22008-01-24 16:13:08 -05002956 return eb;
Chris Masond1310b22008-01-24 16:13:08 -05002957}
2958EXPORT_SYMBOL(find_extent_buffer);
2959
2960void free_extent_buffer(struct extent_buffer *eb)
2961{
Chris Masond1310b22008-01-24 16:13:08 -05002962 if (!eb)
2963 return;
2964
2965 if (!atomic_dec_and_test(&eb->refs))
2966 return;
2967
Chris Mason6af118ce2008-07-22 11:18:07 -04002968 WARN_ON(1);
Chris Masond1310b22008-01-24 16:13:08 -05002969}
2970EXPORT_SYMBOL(free_extent_buffer);
2971
2972int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2973 struct extent_buffer *eb)
2974{
2975 int set;
2976 unsigned long i;
2977 unsigned long num_pages;
2978 struct page *page;
2979
2980 u64 start = eb->start;
2981 u64 end = start + eb->len - 1;
2982
2983 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2984 num_pages = num_extent_pages(eb->start, eb->len);
2985
2986 for (i = 0; i < num_pages; i++) {
2987 page = extent_buffer_page(eb, i);
Chris Masona61e6f22008-07-22 11:18:08 -04002988 lock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05002989 if (i == 0)
2990 set_page_extent_head(page, eb->len);
2991 else
2992 set_page_private(page, EXTENT_PAGE_PRIVATE);
2993
2994 /*
2995 * if we're on the last page or the first page and the
2996 * block isn't aligned on a page boundary, do extra checks
2997 * to make sure we don't clean page that is partially dirty
2998 */
2999 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3000 ((i == num_pages - 1) &&
3001 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3002 start = (u64)page->index << PAGE_CACHE_SHIFT;
3003 end = start + PAGE_CACHE_SIZE - 1;
3004 if (test_range_bit(tree, start, end,
3005 EXTENT_DIRTY, 0)) {
Chris Masona61e6f22008-07-22 11:18:08 -04003006 unlock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05003007 continue;
3008 }
3009 }
3010 clear_page_dirty_for_io(page);
Sven Wegener0ee0fda2008-07-30 16:54:26 -04003011 spin_lock_irq(&page->mapping->tree_lock);
Chris Masond1310b22008-01-24 16:13:08 -05003012 if (!PageDirty(page)) {
3013 radix_tree_tag_clear(&page->mapping->page_tree,
3014 page_index(page),
3015 PAGECACHE_TAG_DIRTY);
3016 }
Sven Wegener0ee0fda2008-07-30 16:54:26 -04003017 spin_unlock_irq(&page->mapping->tree_lock);
Chris Masona61e6f22008-07-22 11:18:08 -04003018 unlock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05003019 }
3020 return 0;
3021}
3022EXPORT_SYMBOL(clear_extent_buffer_dirty);
3023
3024int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
3025 struct extent_buffer *eb)
3026{
3027 return wait_on_extent_writeback(tree, eb->start,
3028 eb->start + eb->len - 1);
3029}
3030EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
3031
3032int set_extent_buffer_dirty(struct extent_io_tree *tree,
3033 struct extent_buffer *eb)
3034{
3035 unsigned long i;
3036 unsigned long num_pages;
3037
3038 num_pages = num_extent_pages(eb->start, eb->len);
3039 for (i = 0; i < num_pages; i++) {
3040 struct page *page = extent_buffer_page(eb, i);
3041 /* writepage may need to do something special for the
3042 * first page, we have to make sure page->private is
3043 * properly set. releasepage may drop page->private
3044 * on us if the page isn't already dirty.
3045 */
Chris Masona1b32a52008-09-05 16:09:51 -04003046 lock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05003047 if (i == 0) {
Chris Masond1310b22008-01-24 16:13:08 -05003048 set_page_extent_head(page, eb->len);
3049 } else if (PagePrivate(page) &&
3050 page->private != EXTENT_PAGE_PRIVATE) {
Chris Masond1310b22008-01-24 16:13:08 -05003051 set_page_extent_mapped(page);
Chris Masond1310b22008-01-24 16:13:08 -05003052 }
3053 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
Chris Masona1b32a52008-09-05 16:09:51 -04003054 set_extent_dirty(tree, page_offset(page),
3055 page_offset(page) + PAGE_CACHE_SIZE -1,
3056 GFP_NOFS);
3057 unlock_page(page);
Chris Masond1310b22008-01-24 16:13:08 -05003058 }
Chris Masona1b32a52008-09-05 16:09:51 -04003059 return 0;
Chris Masond1310b22008-01-24 16:13:08 -05003060}
3061EXPORT_SYMBOL(set_extent_buffer_dirty);
3062
Chris Mason1259ab72008-05-12 13:39:03 -04003063int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3064 struct extent_buffer *eb)
3065{
3066 unsigned long i;
3067 struct page *page;
3068 unsigned long num_pages;
3069
3070 num_pages = num_extent_pages(eb->start, eb->len);
3071 eb->flags &= ~EXTENT_UPTODATE;
3072
3073 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3074 GFP_NOFS);
3075 for (i = 0; i < num_pages; i++) {
3076 page = extent_buffer_page(eb, i);
Chris Mason33958dc2008-07-30 10:29:12 -04003077 if (page)
3078 ClearPageUptodate(page);
Chris Mason1259ab72008-05-12 13:39:03 -04003079 }
3080 return 0;
3081}
3082
Chris Masond1310b22008-01-24 16:13:08 -05003083int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3084 struct extent_buffer *eb)
3085{
3086 unsigned long i;
3087 struct page *page;
3088 unsigned long num_pages;
3089
3090 num_pages = num_extent_pages(eb->start, eb->len);
3091
3092 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3093 GFP_NOFS);
3094 for (i = 0; i < num_pages; i++) {
3095 page = extent_buffer_page(eb, i);
3096 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3097 ((i == num_pages - 1) &&
3098 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3099 check_page_uptodate(tree, page);
3100 continue;
3101 }
3102 SetPageUptodate(page);
3103 }
3104 return 0;
3105}
3106EXPORT_SYMBOL(set_extent_buffer_uptodate);
3107
Chris Masonce9adaa2008-04-09 16:28:12 -04003108int extent_range_uptodate(struct extent_io_tree *tree,
3109 u64 start, u64 end)
3110{
3111 struct page *page;
3112 int ret;
3113 int pg_uptodate = 1;
3114 int uptodate;
3115 unsigned long index;
3116
3117 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
3118 if (ret)
3119 return 1;
3120 while(start <= end) {
3121 index = start >> PAGE_CACHE_SHIFT;
3122 page = find_get_page(tree->mapping, index);
3123 uptodate = PageUptodate(page);
3124 page_cache_release(page);
3125 if (!uptodate) {
3126 pg_uptodate = 0;
3127 break;
3128 }
3129 start += PAGE_CACHE_SIZE;
3130 }
3131 return pg_uptodate;
3132}
3133
Chris Masond1310b22008-01-24 16:13:08 -05003134int extent_buffer_uptodate(struct extent_io_tree *tree,
Chris Masonce9adaa2008-04-09 16:28:12 -04003135 struct extent_buffer *eb)
Chris Masond1310b22008-01-24 16:13:08 -05003136{
Chris Mason728131d2008-04-09 16:28:12 -04003137 int ret = 0;
Chris Masonce9adaa2008-04-09 16:28:12 -04003138 unsigned long num_pages;
3139 unsigned long i;
Chris Mason728131d2008-04-09 16:28:12 -04003140 struct page *page;
3141 int pg_uptodate = 1;
3142
Chris Masond1310b22008-01-24 16:13:08 -05003143 if (eb->flags & EXTENT_UPTODATE)
Chris Mason42352982008-04-28 16:40:52 -04003144 return 1;
Chris Mason728131d2008-04-09 16:28:12 -04003145
Chris Mason42352982008-04-28 16:40:52 -04003146 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
Chris Masond1310b22008-01-24 16:13:08 -05003147 EXTENT_UPTODATE, 1);
Chris Mason42352982008-04-28 16:40:52 -04003148 if (ret)
3149 return ret;
Chris Mason728131d2008-04-09 16:28:12 -04003150
3151 num_pages = num_extent_pages(eb->start, eb->len);
3152 for (i = 0; i < num_pages; i++) {
3153 page = extent_buffer_page(eb, i);
3154 if (!PageUptodate(page)) {
3155 pg_uptodate = 0;
3156 break;
3157 }
3158 }
Chris Mason42352982008-04-28 16:40:52 -04003159 return pg_uptodate;
Chris Masond1310b22008-01-24 16:13:08 -05003160}
3161EXPORT_SYMBOL(extent_buffer_uptodate);
3162
3163int read_extent_buffer_pages(struct extent_io_tree *tree,
3164 struct extent_buffer *eb,
Chris Masona86c12c2008-02-07 10:50:54 -05003165 u64 start, int wait,
Chris Masonf1885912008-04-09 16:28:12 -04003166 get_extent_t *get_extent, int mirror_num)
Chris Masond1310b22008-01-24 16:13:08 -05003167{
3168 unsigned long i;
3169 unsigned long start_i;
3170 struct page *page;
3171 int err;
3172 int ret = 0;
Chris Masonce9adaa2008-04-09 16:28:12 -04003173 int locked_pages = 0;
3174 int all_uptodate = 1;
3175 int inc_all_pages = 0;
Chris Masond1310b22008-01-24 16:13:08 -05003176 unsigned long num_pages;
Chris Masona86c12c2008-02-07 10:50:54 -05003177 struct bio *bio = NULL;
Chris Masonc8b97812008-10-29 14:49:59 -04003178 unsigned long bio_flags = 0;
Chris Masona86c12c2008-02-07 10:50:54 -05003179
Chris Masond1310b22008-01-24 16:13:08 -05003180 if (eb->flags & EXTENT_UPTODATE)
3181 return 0;
3182
Chris Masonce9adaa2008-04-09 16:28:12 -04003183 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
Chris Masond1310b22008-01-24 16:13:08 -05003184 EXTENT_UPTODATE, 1)) {
3185 return 0;
3186 }
3187
3188 if (start) {
3189 WARN_ON(start < eb->start);
3190 start_i = (start >> PAGE_CACHE_SHIFT) -
3191 (eb->start >> PAGE_CACHE_SHIFT);
3192 } else {
3193 start_i = 0;
3194 }
3195
3196 num_pages = num_extent_pages(eb->start, eb->len);
3197 for (i = start_i; i < num_pages; i++) {
3198 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05003199 if (!wait) {
David Woodhouse2db04962008-08-07 11:19:43 -04003200 if (!trylock_page(page))
Chris Masonce9adaa2008-04-09 16:28:12 -04003201 goto unlock_exit;
Chris Masond1310b22008-01-24 16:13:08 -05003202 } else {
3203 lock_page(page);
3204 }
Chris Masonce9adaa2008-04-09 16:28:12 -04003205 locked_pages++;
Chris Masond1310b22008-01-24 16:13:08 -05003206 if (!PageUptodate(page)) {
Chris Masonce9adaa2008-04-09 16:28:12 -04003207 all_uptodate = 0;
3208 }
3209 }
3210 if (all_uptodate) {
3211 if (start_i == 0)
3212 eb->flags |= EXTENT_UPTODATE;
Chris Masona1b32a52008-09-05 16:09:51 -04003213 if (ret) {
3214 printk("all up to date but ret is %d\n", ret);
3215 }
Chris Masonce9adaa2008-04-09 16:28:12 -04003216 goto unlock_exit;
3217 }
3218
3219 for (i = start_i; i < num_pages; i++) {
3220 page = extent_buffer_page(eb, i);
3221 if (inc_all_pages)
3222 page_cache_get(page);
3223 if (!PageUptodate(page)) {
3224 if (start_i == 0)
3225 inc_all_pages = 1;
Chris Masonf1885912008-04-09 16:28:12 -04003226 ClearPageError(page);
Chris Masona86c12c2008-02-07 10:50:54 -05003227 err = __extent_read_full_page(tree, page,
Chris Masonf1885912008-04-09 16:28:12 -04003228 get_extent, &bio,
Chris Masonc8b97812008-10-29 14:49:59 -04003229 mirror_num, &bio_flags);
Chris Masond1310b22008-01-24 16:13:08 -05003230 if (err) {
3231 ret = err;
Chris Masona1b32a52008-09-05 16:09:51 -04003232 printk("err %d from __extent_read_full_page\n", ret);
Chris Masond1310b22008-01-24 16:13:08 -05003233 }
3234 } else {
3235 unlock_page(page);
3236 }
3237 }
3238
Chris Masona86c12c2008-02-07 10:50:54 -05003239 if (bio)
Chris Masonc8b97812008-10-29 14:49:59 -04003240 submit_one_bio(READ, bio, mirror_num, bio_flags);
Chris Masona86c12c2008-02-07 10:50:54 -05003241
Chris Masond1310b22008-01-24 16:13:08 -05003242 if (ret || !wait) {
Chris Masona1b32a52008-09-05 16:09:51 -04003243 if (ret)
3244 printk("ret %d wait %d returning\n", ret, wait);
Chris Masond1310b22008-01-24 16:13:08 -05003245 return ret;
3246 }
Chris Masond1310b22008-01-24 16:13:08 -05003247 for (i = start_i; i < num_pages; i++) {
3248 page = extent_buffer_page(eb, i);
3249 wait_on_page_locked(page);
3250 if (!PageUptodate(page)) {
Chris Masona1b32a52008-09-05 16:09:51 -04003251 printk("page not uptodate after wait_on_page_locked\n");
Chris Masond1310b22008-01-24 16:13:08 -05003252 ret = -EIO;
3253 }
3254 }
3255 if (!ret)
3256 eb->flags |= EXTENT_UPTODATE;
3257 return ret;
Chris Masonce9adaa2008-04-09 16:28:12 -04003258
3259unlock_exit:
3260 i = start_i;
3261 while(locked_pages > 0) {
3262 page = extent_buffer_page(eb, i);
3263 i++;
3264 unlock_page(page);
3265 locked_pages--;
3266 }
3267 return ret;
Chris Masond1310b22008-01-24 16:13:08 -05003268}
3269EXPORT_SYMBOL(read_extent_buffer_pages);
3270
3271void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3272 unsigned long start,
3273 unsigned long len)
3274{
3275 size_t cur;
3276 size_t offset;
3277 struct page *page;
3278 char *kaddr;
3279 char *dst = (char *)dstv;
3280 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3281 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
Chris Masond1310b22008-01-24 16:13:08 -05003282
3283 WARN_ON(start > eb->len);
3284 WARN_ON(start + len > eb->start + eb->len);
3285
3286 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3287
3288 while(len > 0) {
3289 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05003290
3291 cur = min(len, (PAGE_CACHE_SIZE - offset));
3292 kaddr = kmap_atomic(page, KM_USER1);
3293 memcpy(dst, kaddr + offset, cur);
3294 kunmap_atomic(kaddr, KM_USER1);
3295
3296 dst += cur;
3297 len -= cur;
3298 offset = 0;
3299 i++;
3300 }
3301}
3302EXPORT_SYMBOL(read_extent_buffer);
3303
3304int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3305 unsigned long min_len, char **token, char **map,
3306 unsigned long *map_start,
3307 unsigned long *map_len, int km)
3308{
3309 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3310 char *kaddr;
3311 struct page *p;
3312 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3313 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3314 unsigned long end_i = (start_offset + start + min_len - 1) >>
3315 PAGE_CACHE_SHIFT;
3316
3317 if (i != end_i)
3318 return -EINVAL;
3319
3320 if (i == 0) {
3321 offset = start_offset;
3322 *map_start = 0;
3323 } else {
3324 offset = 0;
3325 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3326 }
3327 if (start + min_len > eb->len) {
3328printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
3329 WARN_ON(1);
3330 }
3331
3332 p = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05003333 kaddr = kmap_atomic(p, km);
3334 *token = kaddr;
3335 *map = kaddr + offset;
3336 *map_len = PAGE_CACHE_SIZE - offset;
3337 return 0;
3338}
3339EXPORT_SYMBOL(map_private_extent_buffer);
3340
3341int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3342 unsigned long min_len,
3343 char **token, char **map,
3344 unsigned long *map_start,
3345 unsigned long *map_len, int km)
3346{
3347 int err;
3348 int save = 0;
3349 if (eb->map_token) {
3350 unmap_extent_buffer(eb, eb->map_token, km);
3351 eb->map_token = NULL;
3352 save = 1;
3353 }
3354 err = map_private_extent_buffer(eb, start, min_len, token, map,
3355 map_start, map_len, km);
3356 if (!err && save) {
3357 eb->map_token = *token;
3358 eb->kaddr = *map;
3359 eb->map_start = *map_start;
3360 eb->map_len = *map_len;
3361 }
3362 return err;
3363}
3364EXPORT_SYMBOL(map_extent_buffer);
3365
3366void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3367{
3368 kunmap_atomic(token, km);
3369}
3370EXPORT_SYMBOL(unmap_extent_buffer);
3371
3372int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3373 unsigned long start,
3374 unsigned long len)
3375{
3376 size_t cur;
3377 size_t offset;
3378 struct page *page;
3379 char *kaddr;
3380 char *ptr = (char *)ptrv;
3381 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3382 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3383 int ret = 0;
3384
3385 WARN_ON(start > eb->len);
3386 WARN_ON(start + len > eb->start + eb->len);
3387
3388 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3389
3390 while(len > 0) {
3391 page = extent_buffer_page(eb, i);
Chris Masond1310b22008-01-24 16:13:08 -05003392
3393 cur = min(len, (PAGE_CACHE_SIZE - offset));
3394
3395 kaddr = kmap_atomic(page, KM_USER0);
3396 ret = memcmp(ptr, kaddr + offset, cur);
3397 kunmap_atomic(kaddr, KM_USER0);
3398 if (ret)
3399 break;
3400
3401 ptr += cur;
3402 len -= cur;
3403 offset = 0;
3404 i++;
3405 }
3406 return ret;
3407}
3408EXPORT_SYMBOL(memcmp_extent_buffer);
3409
3410void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3411 unsigned long start, unsigned long len)
3412{
3413 size_t cur;
3414 size_t offset;
3415 struct page *page;
3416 char *kaddr;
3417 char *src = (char *)srcv;
3418 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3419 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3420
3421 WARN_ON(start > eb->len);
3422 WARN_ON(start + len > eb->start + eb->len);
3423
3424 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3425
3426 while(len > 0) {
3427 page = extent_buffer_page(eb, i);
3428 WARN_ON(!PageUptodate(page));
3429
3430 cur = min(len, PAGE_CACHE_SIZE - offset);
3431 kaddr = kmap_atomic(page, KM_USER1);
3432 memcpy(kaddr + offset, src, cur);
3433 kunmap_atomic(kaddr, KM_USER1);
3434
3435 src += cur;
3436 len -= cur;
3437 offset = 0;
3438 i++;
3439 }
3440}
3441EXPORT_SYMBOL(write_extent_buffer);
3442
3443void memset_extent_buffer(struct extent_buffer *eb, char c,
3444 unsigned long start, unsigned long len)
3445{
3446 size_t cur;
3447 size_t offset;
3448 struct page *page;
3449 char *kaddr;
3450 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3451 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3452
3453 WARN_ON(start > eb->len);
3454 WARN_ON(start + len > eb->start + eb->len);
3455
3456 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3457
3458 while(len > 0) {
3459 page = extent_buffer_page(eb, i);
3460 WARN_ON(!PageUptodate(page));
3461
3462 cur = min(len, PAGE_CACHE_SIZE - offset);
3463 kaddr = kmap_atomic(page, KM_USER0);
3464 memset(kaddr + offset, c, cur);
3465 kunmap_atomic(kaddr, KM_USER0);
3466
3467 len -= cur;
3468 offset = 0;
3469 i++;
3470 }
3471}
3472EXPORT_SYMBOL(memset_extent_buffer);
3473
3474void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3475 unsigned long dst_offset, unsigned long src_offset,
3476 unsigned long len)
3477{
3478 u64 dst_len = dst->len;
3479 size_t cur;
3480 size_t offset;
3481 struct page *page;
3482 char *kaddr;
3483 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3484 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3485
3486 WARN_ON(src->len != dst_len);
3487
3488 offset = (start_offset + dst_offset) &
3489 ((unsigned long)PAGE_CACHE_SIZE - 1);
3490
3491 while(len > 0) {
3492 page = extent_buffer_page(dst, i);
3493 WARN_ON(!PageUptodate(page));
3494
3495 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3496
3497 kaddr = kmap_atomic(page, KM_USER0);
3498 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3499 kunmap_atomic(kaddr, KM_USER0);
3500
3501 src_offset += cur;
3502 len -= cur;
3503 offset = 0;
3504 i++;
3505 }
3506}
3507EXPORT_SYMBOL(copy_extent_buffer);
3508
3509static void move_pages(struct page *dst_page, struct page *src_page,
3510 unsigned long dst_off, unsigned long src_off,
3511 unsigned long len)
3512{
3513 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3514 if (dst_page == src_page) {
3515 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3516 } else {
3517 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3518 char *p = dst_kaddr + dst_off + len;
3519 char *s = src_kaddr + src_off + len;
3520
3521 while (len--)
3522 *--p = *--s;
3523
3524 kunmap_atomic(src_kaddr, KM_USER1);
3525 }
3526 kunmap_atomic(dst_kaddr, KM_USER0);
3527}
3528
3529static void copy_pages(struct page *dst_page, struct page *src_page,
3530 unsigned long dst_off, unsigned long src_off,
3531 unsigned long len)
3532{
3533 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3534 char *src_kaddr;
3535
3536 if (dst_page != src_page)
3537 src_kaddr = kmap_atomic(src_page, KM_USER1);
3538 else
3539 src_kaddr = dst_kaddr;
3540
3541 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3542 kunmap_atomic(dst_kaddr, KM_USER0);
3543 if (dst_page != src_page)
3544 kunmap_atomic(src_kaddr, KM_USER1);
3545}
3546
3547void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3548 unsigned long src_offset, unsigned long len)
3549{
3550 size_t cur;
3551 size_t dst_off_in_page;
3552 size_t src_off_in_page;
3553 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3554 unsigned long dst_i;
3555 unsigned long src_i;
3556
3557 if (src_offset + len > dst->len) {
3558 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3559 src_offset, len, dst->len);
3560 BUG_ON(1);
3561 }
3562 if (dst_offset + len > dst->len) {
3563 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3564 dst_offset, len, dst->len);
3565 BUG_ON(1);
3566 }
3567
3568 while(len > 0) {
3569 dst_off_in_page = (start_offset + dst_offset) &
3570 ((unsigned long)PAGE_CACHE_SIZE - 1);
3571 src_off_in_page = (start_offset + src_offset) &
3572 ((unsigned long)PAGE_CACHE_SIZE - 1);
3573
3574 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3575 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3576
3577 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3578 src_off_in_page));
3579 cur = min_t(unsigned long, cur,
3580 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3581
3582 copy_pages(extent_buffer_page(dst, dst_i),
3583 extent_buffer_page(dst, src_i),
3584 dst_off_in_page, src_off_in_page, cur);
3585
3586 src_offset += cur;
3587 dst_offset += cur;
3588 len -= cur;
3589 }
3590}
3591EXPORT_SYMBOL(memcpy_extent_buffer);
3592
3593void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3594 unsigned long src_offset, unsigned long len)
3595{
3596 size_t cur;
3597 size_t dst_off_in_page;
3598 size_t src_off_in_page;
3599 unsigned long dst_end = dst_offset + len - 1;
3600 unsigned long src_end = src_offset + len - 1;
3601 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3602 unsigned long dst_i;
3603 unsigned long src_i;
3604
3605 if (src_offset + len > dst->len) {
3606 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3607 src_offset, len, dst->len);
3608 BUG_ON(1);
3609 }
3610 if (dst_offset + len > dst->len) {
3611 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3612 dst_offset, len, dst->len);
3613 BUG_ON(1);
3614 }
3615 if (dst_offset < src_offset) {
3616 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3617 return;
3618 }
3619 while(len > 0) {
3620 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3621 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3622
3623 dst_off_in_page = (start_offset + dst_end) &
3624 ((unsigned long)PAGE_CACHE_SIZE - 1);
3625 src_off_in_page = (start_offset + src_end) &
3626 ((unsigned long)PAGE_CACHE_SIZE - 1);
3627
3628 cur = min_t(unsigned long, len, src_off_in_page + 1);
3629 cur = min(cur, dst_off_in_page + 1);
3630 move_pages(extent_buffer_page(dst, dst_i),
3631 extent_buffer_page(dst, src_i),
3632 dst_off_in_page - cur + 1,
3633 src_off_in_page - cur + 1, cur);
3634
3635 dst_end -= cur;
3636 src_end -= cur;
3637 len -= cur;
3638 }
3639}
3640EXPORT_SYMBOL(memmove_extent_buffer);
Chris Mason6af118ce2008-07-22 11:18:07 -04003641
3642int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3643{
3644 u64 start = page_offset(page);
3645 struct extent_buffer *eb;
3646 int ret = 1;
3647 unsigned long i;
3648 unsigned long num_pages;
3649
3650 spin_lock(&tree->buffer_lock);
3651 eb = buffer_search(tree, start);
3652 if (!eb)
3653 goto out;
3654
3655 if (atomic_read(&eb->refs) > 1) {
3656 ret = 0;
3657 goto out;
3658 }
3659 /* at this point we can safely release the extent buffer */
3660 num_pages = num_extent_pages(eb->start, eb->len);
Christoph Hellwigb2141072008-09-05 16:43:31 -04003661 for (i = 0; i < num_pages; i++)
3662 page_cache_release(extent_buffer_page(eb, i));
Chris Mason6af118ce2008-07-22 11:18:07 -04003663 rb_erase(&eb->rb_node, &tree->buffer);
3664 __free_extent_buffer(eb);
3665out:
3666 spin_unlock(&tree->buffer_lock);
3667 return ret;
3668}
3669EXPORT_SYMBOL(try_release_extent_buffer);