blob: 238cb1d81d56091b2e629263a3dd100ef7486c26 [file] [log] [blame]
Chris Masona52d9a82007-08-27 16:49:44 -04001#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
5#include <linux/gfp.h>
6#include <linux/pagemap.h>
7#include <linux/page-flags.h>
8#include <linux/module.h>
9#include <linux/spinlock.h>
10#include <linux/blkdev.h>
Chris Mason4dc11902007-10-15 16:18:14 -040011#include <linux/swap.h>
Jens Axboe0a2118d2007-10-19 09:23:05 -040012#include <linux/version.h>
Chris Masona52d9a82007-08-27 16:49:44 -040013#include "extent_map.h"
14
Chris Mason86479a02007-09-10 19:58:16 -040015/* temporary define until extent_map moves out of btrfs */
16struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
17 unsigned long extra_flags,
18 void (*ctor)(void *, struct kmem_cache *,
19 unsigned long));
20
Chris Masona52d9a82007-08-27 16:49:44 -040021static struct kmem_cache *extent_map_cache;
22static struct kmem_cache *extent_state_cache;
Chris Mason6d36dcd2007-10-15 16:14:37 -040023static struct kmem_cache *extent_buffer_cache;
Chris Masonf510cfe2007-10-15 16:14:48 -040024
Chris Masonf510cfe2007-10-15 16:14:48 -040025static LIST_HEAD(buffers);
26static LIST_HEAD(states);
27
Chris Masonf510cfe2007-10-15 16:14:48 -040028static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
Chris Mason4dc11902007-10-15 16:18:14 -040029#define BUFFER_LRU_MAX 64
Chris Masona52d9a82007-08-27 16:49:44 -040030
31struct tree_entry {
32 u64 start;
33 u64 end;
34 int in_tree;
35 struct rb_node rb_node;
36};
37
Chris Masona52d9a82007-08-27 16:49:44 -040038void __init extent_map_init(void)
39{
Chris Mason86479a02007-09-10 19:58:16 -040040 extent_map_cache = btrfs_cache_create("extent_map",
Chris Mason6d36dcd2007-10-15 16:14:37 -040041 sizeof(struct extent_map), 0,
Chris Masona52d9a82007-08-27 16:49:44 -040042 NULL);
Chris Mason86479a02007-09-10 19:58:16 -040043 extent_state_cache = btrfs_cache_create("extent_state",
Chris Mason6d36dcd2007-10-15 16:14:37 -040044 sizeof(struct extent_state), 0,
Chris Masona52d9a82007-08-27 16:49:44 -040045 NULL);
Chris Mason6d36dcd2007-10-15 16:14:37 -040046 extent_buffer_cache = btrfs_cache_create("extent_buffers",
47 sizeof(struct extent_buffer), 0,
48 NULL);
Chris Masona52d9a82007-08-27 16:49:44 -040049}
50
51void __exit extent_map_exit(void)
52{
Chris Masonf510cfe2007-10-15 16:14:48 -040053 struct extent_state *state;
Chris Mason6d36dcd2007-10-15 16:14:37 -040054
Chris Masonf510cfe2007-10-15 16:14:48 -040055 while (!list_empty(&states)) {
56 state = list_entry(states.next, struct extent_state, list);
57 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
58 list_del(&state->list);
59 kmem_cache_free(extent_state_cache, state);
60
61 }
Chris Masonf510cfe2007-10-15 16:14:48 -040062
Chris Masona52d9a82007-08-27 16:49:44 -040063 if (extent_map_cache)
64 kmem_cache_destroy(extent_map_cache);
65 if (extent_state_cache)
66 kmem_cache_destroy(extent_state_cache);
Chris Mason6d36dcd2007-10-15 16:14:37 -040067 if (extent_buffer_cache)
68 kmem_cache_destroy(extent_buffer_cache);
Chris Masona52d9a82007-08-27 16:49:44 -040069}
70
71void extent_map_tree_init(struct extent_map_tree *tree,
72 struct address_space *mapping, gfp_t mask)
73{
74 tree->map.rb_node = NULL;
75 tree->state.rb_node = NULL;
Chris Mason07157aa2007-08-30 08:50:51 -040076 tree->ops = NULL;
Chris Masona52d9a82007-08-27 16:49:44 -040077 rwlock_init(&tree->lock);
Chris Mason4dc11902007-10-15 16:18:14 -040078 spin_lock_init(&tree->lru_lock);
Chris Masona52d9a82007-08-27 16:49:44 -040079 tree->mapping = mapping;
Chris Mason4dc11902007-10-15 16:18:14 -040080 INIT_LIST_HEAD(&tree->buffer_lru);
81 tree->lru_size = 0;
Chris Masona52d9a82007-08-27 16:49:44 -040082}
83EXPORT_SYMBOL(extent_map_tree_init);
84
Chris Mason19c00dd2007-10-15 16:19:22 -040085void extent_map_tree_empty_lru(struct extent_map_tree *tree)
Chris Mason4dc11902007-10-15 16:18:14 -040086{
87 struct extent_buffer *eb;
88 while(!list_empty(&tree->buffer_lru)) {
89 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
90 lru);
91 list_del(&eb->lru);
92 free_extent_buffer(eb);
93 }
94}
Chris Mason19c00dd2007-10-15 16:19:22 -040095EXPORT_SYMBOL(extent_map_tree_empty_lru);
Chris Mason4dc11902007-10-15 16:18:14 -040096
Chris Masona52d9a82007-08-27 16:49:44 -040097struct extent_map *alloc_extent_map(gfp_t mask)
98{
99 struct extent_map *em;
100 em = kmem_cache_alloc(extent_map_cache, mask);
101 if (!em || IS_ERR(em))
102 return em;
103 em->in_tree = 0;
104 atomic_set(&em->refs, 1);
105 return em;
106}
107EXPORT_SYMBOL(alloc_extent_map);
108
109void free_extent_map(struct extent_map *em)
110{
Chris Mason2bf5a722007-08-30 11:54:02 -0400111 if (!em)
112 return;
Chris Masona52d9a82007-08-27 16:49:44 -0400113 if (atomic_dec_and_test(&em->refs)) {
114 WARN_ON(em->in_tree);
115 kmem_cache_free(extent_map_cache, em);
116 }
117}
118EXPORT_SYMBOL(free_extent_map);
119
120
121struct extent_state *alloc_extent_state(gfp_t mask)
122{
123 struct extent_state *state;
Chris Masonf510cfe2007-10-15 16:14:48 -0400124 unsigned long flags;
125
Chris Masona52d9a82007-08-27 16:49:44 -0400126 state = kmem_cache_alloc(extent_state_cache, mask);
127 if (!state || IS_ERR(state))
128 return state;
129 state->state = 0;
130 state->in_tree = 0;
Chris Mason07157aa2007-08-30 08:50:51 -0400131 state->private = 0;
Chris Masonf510cfe2007-10-15 16:14:48 -0400132
133 spin_lock_irqsave(&state_lock, flags);
134 list_add(&state->list, &states);
135 spin_unlock_irqrestore(&state_lock, flags);
136
Chris Masona52d9a82007-08-27 16:49:44 -0400137 atomic_set(&state->refs, 1);
138 init_waitqueue_head(&state->wq);
Chris Masona52d9a82007-08-27 16:49:44 -0400139 return state;
140}
141EXPORT_SYMBOL(alloc_extent_state);
142
143void free_extent_state(struct extent_state *state)
144{
Chris Masonf510cfe2007-10-15 16:14:48 -0400145 unsigned long flags;
Chris Mason2bf5a722007-08-30 11:54:02 -0400146 if (!state)
147 return;
Chris Masona52d9a82007-08-27 16:49:44 -0400148 if (atomic_dec_and_test(&state->refs)) {
149 WARN_ON(state->in_tree);
Chris Masonf510cfe2007-10-15 16:14:48 -0400150 spin_lock_irqsave(&state_lock, flags);
151 list_del(&state->list);
152 spin_unlock_irqrestore(&state_lock, flags);
Chris Masona52d9a82007-08-27 16:49:44 -0400153 kmem_cache_free(extent_state_cache, state);
154 }
155}
156EXPORT_SYMBOL(free_extent_state);
157
158static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
159 struct rb_node *node)
160{
161 struct rb_node ** p = &root->rb_node;
162 struct rb_node * parent = NULL;
163 struct tree_entry *entry;
164
165 while(*p) {
166 parent = *p;
167 entry = rb_entry(parent, struct tree_entry, rb_node);
168
169 if (offset < entry->start)
170 p = &(*p)->rb_left;
171 else if (offset > entry->end)
172 p = &(*p)->rb_right;
173 else
174 return parent;
175 }
176
177 entry = rb_entry(node, struct tree_entry, rb_node);
178 entry->in_tree = 1;
179 rb_link_node(node, parent, p);
180 rb_insert_color(node, root);
181 return NULL;
182}
183
184static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
185 struct rb_node **prev_ret)
186{
187 struct rb_node * n = root->rb_node;
188 struct rb_node *prev = NULL;
189 struct tree_entry *entry;
190 struct tree_entry *prev_entry = NULL;
191
192 while(n) {
193 entry = rb_entry(n, struct tree_entry, rb_node);
194 prev = n;
195 prev_entry = entry;
196
197 if (offset < entry->start)
198 n = n->rb_left;
199 else if (offset > entry->end)
200 n = n->rb_right;
201 else
202 return n;
203 }
204 if (!prev_ret)
205 return NULL;
206 while(prev && offset > prev_entry->end) {
207 prev = rb_next(prev);
208 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
209 }
210 *prev_ret = prev;
211 return NULL;
212}
213
214static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
215{
216 struct rb_node *prev;
217 struct rb_node *ret;
218 ret = __tree_search(root, offset, &prev);
219 if (!ret)
220 return prev;
221 return ret;
222}
223
224static int tree_delete(struct rb_root *root, u64 offset)
225{
226 struct rb_node *node;
227 struct tree_entry *entry;
228
229 node = __tree_search(root, offset, NULL);
230 if (!node)
231 return -ENOENT;
232 entry = rb_entry(node, struct tree_entry, rb_node);
233 entry->in_tree = 0;
234 rb_erase(node, root);
235 return 0;
236}
237
238/*
239 * add_extent_mapping tries a simple backward merge with existing
240 * mappings. The extent_map struct passed in will be inserted into
241 * the tree directly (no copies made, just a reference taken).
242 */
243int add_extent_mapping(struct extent_map_tree *tree,
244 struct extent_map *em)
245{
246 int ret = 0;
247 struct extent_map *prev = NULL;
248 struct rb_node *rb;
249
250 write_lock_irq(&tree->lock);
251 rb = tree_insert(&tree->map, em->end, &em->rb_node);
252 if (rb) {
253 prev = rb_entry(rb, struct extent_map, rb_node);
254 printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
255 ret = -EEXIST;
256 goto out;
257 }
258 atomic_inc(&em->refs);
259 if (em->start != 0) {
260 rb = rb_prev(&em->rb_node);
261 if (rb)
262 prev = rb_entry(rb, struct extent_map, rb_node);
263 if (prev && prev->end + 1 == em->start &&
Chris Mason5f39d392007-10-15 16:14:19 -0400264 ((em->block_start == EXTENT_MAP_HOLE &&
265 prev->block_start == EXTENT_MAP_HOLE) ||
Chris Masona52d9a82007-08-27 16:49:44 -0400266 (em->block_start == prev->block_end + 1))) {
267 em->start = prev->start;
268 em->block_start = prev->block_start;
269 rb_erase(&prev->rb_node, &tree->map);
270 prev->in_tree = 0;
271 free_extent_map(prev);
272 }
273 }
274out:
275 write_unlock_irq(&tree->lock);
276 return ret;
277}
278EXPORT_SYMBOL(add_extent_mapping);
279
280/*
281 * lookup_extent_mapping returns the first extent_map struct in the
282 * tree that intersects the [start, end] (inclusive) range. There may
283 * be additional objects in the tree that intersect, so check the object
284 * returned carefully to make sure you don't need additional lookups.
285 */
286struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
287 u64 start, u64 end)
288{
289 struct extent_map *em;
290 struct rb_node *rb_node;
291
292 read_lock_irq(&tree->lock);
293 rb_node = tree_search(&tree->map, start);
294 if (!rb_node) {
295 em = NULL;
296 goto out;
297 }
298 if (IS_ERR(rb_node)) {
299 em = ERR_PTR(PTR_ERR(rb_node));
300 goto out;
301 }
302 em = rb_entry(rb_node, struct extent_map, rb_node);
303 if (em->end < start || em->start > end) {
304 em = NULL;
305 goto out;
306 }
307 atomic_inc(&em->refs);
308out:
309 read_unlock_irq(&tree->lock);
310 return em;
311}
312EXPORT_SYMBOL(lookup_extent_mapping);
313
314/*
315 * removes an extent_map struct from the tree. No reference counts are
316 * dropped, and no checks are done to see if the range is in use
317 */
318int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
319{
320 int ret;
321
322 write_lock_irq(&tree->lock);
323 ret = tree_delete(&tree->map, em->end);
324 write_unlock_irq(&tree->lock);
325 return ret;
326}
327EXPORT_SYMBOL(remove_extent_mapping);
328
329/*
330 * utility function to look for merge candidates inside a given range.
331 * Any extents with matching state are merged together into a single
332 * extent in the tree. Extents with EXTENT_IO in their state field
333 * are not merged because the end_io handlers need to be able to do
334 * operations on them without sleeping (or doing allocations/splits).
335 *
336 * This should be called with the tree lock held.
337 */
338static int merge_state(struct extent_map_tree *tree,
339 struct extent_state *state)
340{
341 struct extent_state *other;
342 struct rb_node *other_node;
343
344 if (state->state & EXTENT_IOBITS)
345 return 0;
346
347 other_node = rb_prev(&state->rb_node);
348 if (other_node) {
349 other = rb_entry(other_node, struct extent_state, rb_node);
350 if (other->end == state->start - 1 &&
351 other->state == state->state) {
352 state->start = other->start;
353 other->in_tree = 0;
354 rb_erase(&other->rb_node, &tree->state);
355 free_extent_state(other);
356 }
357 }
358 other_node = rb_next(&state->rb_node);
359 if (other_node) {
360 other = rb_entry(other_node, struct extent_state, rb_node);
361 if (other->start == state->end + 1 &&
362 other->state == state->state) {
363 other->start = state->start;
364 state->in_tree = 0;
365 rb_erase(&state->rb_node, &tree->state);
366 free_extent_state(state);
367 }
368 }
369 return 0;
370}
371
372/*
373 * insert an extent_state struct into the tree. 'bits' are set on the
374 * struct before it is inserted.
375 *
376 * This may return -EEXIST if the extent is already there, in which case the
377 * state struct is freed.
378 *
379 * The tree lock is not taken internally. This is a utility function and
380 * probably isn't what you want to call (see set/clear_extent_bit).
381 */
382static int insert_state(struct extent_map_tree *tree,
383 struct extent_state *state, u64 start, u64 end,
384 int bits)
385{
386 struct rb_node *node;
387
388 if (end < start) {
389 printk("end < start %Lu %Lu\n", end, start);
390 WARN_ON(1);
391 }
392 state->state |= bits;
393 state->start = start;
394 state->end = end;
Chris Masona52d9a82007-08-27 16:49:44 -0400395 node = tree_insert(&tree->state, end, &state->rb_node);
396 if (node) {
397 struct extent_state *found;
398 found = rb_entry(node, struct extent_state, rb_node);
Chris Masonb888db22007-08-27 16:49:44 -0400399 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
Chris Masona52d9a82007-08-27 16:49:44 -0400400 free_extent_state(state);
401 return -EEXIST;
402 }
403 merge_state(tree, state);
404 return 0;
405}
406
407/*
408 * split a given extent state struct in two, inserting the preallocated
409 * struct 'prealloc' as the newly created second half. 'split' indicates an
410 * offset inside 'orig' where it should be split.
411 *
412 * Before calling,
413 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
414 * are two extent state structs in the tree:
415 * prealloc: [orig->start, split - 1]
416 * orig: [ split, orig->end ]
417 *
418 * The tree locks are not taken by this function. They need to be held
419 * by the caller.
420 */
421static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
422 struct extent_state *prealloc, u64 split)
423{
424 struct rb_node *node;
425 prealloc->start = orig->start;
426 prealloc->end = split - 1;
427 prealloc->state = orig->state;
428 orig->start = split;
Chris Masonf510cfe2007-10-15 16:14:48 -0400429
Chris Masona52d9a82007-08-27 16:49:44 -0400430 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
431 if (node) {
432 struct extent_state *found;
433 found = rb_entry(node, struct extent_state, rb_node);
Chris Masonb888db22007-08-27 16:49:44 -0400434 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
Chris Masona52d9a82007-08-27 16:49:44 -0400435 free_extent_state(prealloc);
436 return -EEXIST;
437 }
438 return 0;
439}
440
441/*
442 * utility function to clear some bits in an extent state struct.
443 * it will optionally wake up any one waiting on this state (wake == 1), or
444 * forcibly remove the state from the tree (delete == 1).
445 *
446 * If no bits are set on the state struct after clearing things, the
447 * struct is freed and removed from the tree
448 */
449static int clear_state_bit(struct extent_map_tree *tree,
450 struct extent_state *state, int bits, int wake,
451 int delete)
452{
453 int ret = state->state & bits;
454 state->state &= ~bits;
455 if (wake)
456 wake_up(&state->wq);
457 if (delete || state->state == 0) {
458 if (state->in_tree) {
459 rb_erase(&state->rb_node, &tree->state);
460 state->in_tree = 0;
461 free_extent_state(state);
462 } else {
463 WARN_ON(1);
464 }
465 } else {
466 merge_state(tree, state);
467 }
468 return ret;
469}
470
471/*
472 * clear some bits on a range in the tree. This may require splitting
473 * or inserting elements in the tree, so the gfp mask is used to
474 * indicate which allocations or sleeping are allowed.
475 *
476 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
477 * the given range from the tree regardless of state (ie for truncate).
478 *
479 * the range [start, end] is inclusive.
480 *
481 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
482 * bits were already set, or zero if none of the bits were already set.
483 */
484int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
485 int bits, int wake, int delete, gfp_t mask)
486{
487 struct extent_state *state;
488 struct extent_state *prealloc = NULL;
489 struct rb_node *node;
Christoph Hellwig90f1c192007-09-10 20:02:27 -0400490 unsigned long flags;
Chris Masona52d9a82007-08-27 16:49:44 -0400491 int err;
492 int set = 0;
493
494again:
495 if (!prealloc && (mask & __GFP_WAIT)) {
496 prealloc = alloc_extent_state(mask);
497 if (!prealloc)
498 return -ENOMEM;
499 }
500
Christoph Hellwig90f1c192007-09-10 20:02:27 -0400501 write_lock_irqsave(&tree->lock, flags);
Chris Masona52d9a82007-08-27 16:49:44 -0400502 /*
503 * this search will find the extents that end after
504 * our range starts
505 */
506 node = tree_search(&tree->state, start);
507 if (!node)
508 goto out;
509 state = rb_entry(node, struct extent_state, rb_node);
510 if (state->start > end)
511 goto out;
512 WARN_ON(state->end < start);
513
514 /*
515 * | ---- desired range ---- |
516 * | state | or
517 * | ------------- state -------------- |
518 *
519 * We need to split the extent we found, and may flip
520 * bits on second half.
521 *
522 * If the extent we found extends past our range, we
523 * just split and search again. It'll get split again
524 * the next time though.
525 *
526 * If the extent we found is inside our range, we clear
527 * the desired bit on it.
528 */
529
530 if (state->start < start) {
531 err = split_state(tree, state, prealloc, start);
532 BUG_ON(err == -EEXIST);
533 prealloc = NULL;
534 if (err)
535 goto out;
536 if (state->end <= end) {
537 start = state->end + 1;
538 set |= clear_state_bit(tree, state, bits,
539 wake, delete);
540 } else {
541 start = state->start;
542 }
543 goto search_again;
544 }
545 /*
546 * | ---- desired range ---- |
547 * | state |
548 * We need to split the extent, and clear the bit
549 * on the first half
550 */
551 if (state->start <= end && state->end > end) {
552 err = split_state(tree, state, prealloc, end + 1);
553 BUG_ON(err == -EEXIST);
554
555 if (wake)
556 wake_up(&state->wq);
557 set |= clear_state_bit(tree, prealloc, bits,
558 wake, delete);
559 prealloc = NULL;
560 goto out;
561 }
562
563 start = state->end + 1;
564 set |= clear_state_bit(tree, state, bits, wake, delete);
565 goto search_again;
566
567out:
Christoph Hellwig90f1c192007-09-10 20:02:27 -0400568 write_unlock_irqrestore(&tree->lock, flags);
Chris Masona52d9a82007-08-27 16:49:44 -0400569 if (prealloc)
570 free_extent_state(prealloc);
571
572 return set;
573
574search_again:
Chris Mason96b51792007-10-15 16:15:19 -0400575 if (start > end)
Chris Masona52d9a82007-08-27 16:49:44 -0400576 goto out;
Christoph Hellwig90f1c192007-09-10 20:02:27 -0400577 write_unlock_irqrestore(&tree->lock, flags);
Chris Masona52d9a82007-08-27 16:49:44 -0400578 if (mask & __GFP_WAIT)
579 cond_resched();
580 goto again;
581}
582EXPORT_SYMBOL(clear_extent_bit);
583
584static int wait_on_state(struct extent_map_tree *tree,
585 struct extent_state *state)
586{
587 DEFINE_WAIT(wait);
588 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
589 read_unlock_irq(&tree->lock);
590 schedule();
591 read_lock_irq(&tree->lock);
592 finish_wait(&state->wq, &wait);
593 return 0;
594}
595
596/*
597 * waits for one or more bits to clear on a range in the state tree.
598 * The range [start, end] is inclusive.
599 * The tree lock is taken by this function
600 */
601int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
602{
603 struct extent_state *state;
604 struct rb_node *node;
605
606 read_lock_irq(&tree->lock);
607again:
608 while (1) {
609 /*
610 * this search will find all the extents that end after
611 * our range starts
612 */
613 node = tree_search(&tree->state, start);
614 if (!node)
615 break;
616
617 state = rb_entry(node, struct extent_state, rb_node);
618
619 if (state->start > end)
620 goto out;
621
622 if (state->state & bits) {
623 start = state->start;
624 atomic_inc(&state->refs);
625 wait_on_state(tree, state);
626 free_extent_state(state);
627 goto again;
628 }
629 start = state->end + 1;
630
631 if (start > end)
632 break;
633
634 if (need_resched()) {
635 read_unlock_irq(&tree->lock);
636 cond_resched();
637 read_lock_irq(&tree->lock);
638 }
639 }
640out:
641 read_unlock_irq(&tree->lock);
642 return 0;
643}
644EXPORT_SYMBOL(wait_extent_bit);
645
646/*
647 * set some bits on a range in the tree. This may require allocations
648 * or sleeping, so the gfp mask is used to indicate what is allowed.
649 *
650 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
651 * range already has the desired bits set. The start of the existing
652 * range is returned in failed_start in this case.
653 *
654 * [start, end] is inclusive
655 * This takes the tree lock.
656 */
657int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
658 int exclusive, u64 *failed_start, gfp_t mask)
659{
660 struct extent_state *state;
661 struct extent_state *prealloc = NULL;
662 struct rb_node *node;
Christoph Hellwig90f1c192007-09-10 20:02:27 -0400663 unsigned long flags;
Chris Masona52d9a82007-08-27 16:49:44 -0400664 int err = 0;
665 int set;
666 u64 last_start;
667 u64 last_end;
668again:
669 if (!prealloc && (mask & __GFP_WAIT)) {
670 prealloc = alloc_extent_state(mask);
671 if (!prealloc)
672 return -ENOMEM;
673 }
674
Christoph Hellwig90f1c192007-09-10 20:02:27 -0400675 write_lock_irqsave(&tree->lock, flags);
Chris Masona52d9a82007-08-27 16:49:44 -0400676 /*
677 * this search will find all the extents that end after
678 * our range starts.
679 */
680 node = tree_search(&tree->state, start);
681 if (!node) {
682 err = insert_state(tree, prealloc, start, end, bits);
683 prealloc = NULL;
684 BUG_ON(err == -EEXIST);
685 goto out;
686 }
687
688 state = rb_entry(node, struct extent_state, rb_node);
689 last_start = state->start;
690 last_end = state->end;
691
692 /*
693 * | ---- desired range ---- |
694 * | state |
695 *
696 * Just lock what we found and keep going
697 */
698 if (state->start == start && state->end <= end) {
699 set = state->state & bits;
700 if (set && exclusive) {
701 *failed_start = state->start;
702 err = -EEXIST;
703 goto out;
704 }
705 state->state |= bits;
706 start = state->end + 1;
707 merge_state(tree, state);
708 goto search_again;
709 }
710
711 /*
712 * | ---- desired range ---- |
713 * | state |
714 * or
715 * | ------------- state -------------- |
716 *
717 * We need to split the extent we found, and may flip bits on
718 * second half.
719 *
720 * If the extent we found extends past our
721 * range, we just split and search again. It'll get split
722 * again the next time though.
723 *
724 * If the extent we found is inside our range, we set the
725 * desired bit on it.
726 */
727 if (state->start < start) {
728 set = state->state & bits;
729 if (exclusive && set) {
730 *failed_start = start;
731 err = -EEXIST;
732 goto out;
733 }
734 err = split_state(tree, state, prealloc, start);
735 BUG_ON(err == -EEXIST);
736 prealloc = NULL;
737 if (err)
738 goto out;
739 if (state->end <= end) {
740 state->state |= bits;
741 start = state->end + 1;
742 merge_state(tree, state);
743 } else {
744 start = state->start;
745 }
746 goto search_again;
747 }
748 /*
749 * | ---- desired range ---- |
Chris Masona52d9a82007-08-27 16:49:44 -0400750 * | state | or | state |
751 *
752 * There's a hole, we need to insert something in it and
753 * ignore the extent we found.
754 */
755 if (state->start > start) {
756 u64 this_end;
757 if (end < last_start)
758 this_end = end;
759 else
760 this_end = last_start -1;
761 err = insert_state(tree, prealloc, start, this_end,
762 bits);
763 prealloc = NULL;
764 BUG_ON(err == -EEXIST);
765 if (err)
766 goto out;
767 start = this_end + 1;
768 goto search_again;
769 }
Chris Masona8c450b2007-09-10 20:00:27 -0400770 /*
771 * | ---- desired range ---- |
772 * | state |
773 * We need to split the extent, and set the bit
774 * on the first half
775 */
776 if (state->start <= end && state->end > end) {
777 set = state->state & bits;
778 if (exclusive && set) {
779 *failed_start = start;
780 err = -EEXIST;
781 goto out;
782 }
783 err = split_state(tree, state, prealloc, end + 1);
784 BUG_ON(err == -EEXIST);
785
786 prealloc->state |= bits;
787 merge_state(tree, prealloc);
788 prealloc = NULL;
789 goto out;
790 }
791
Chris Masona52d9a82007-08-27 16:49:44 -0400792 goto search_again;
793
794out:
Christoph Hellwig90f1c192007-09-10 20:02:27 -0400795 write_unlock_irqrestore(&tree->lock, flags);
Chris Masona52d9a82007-08-27 16:49:44 -0400796 if (prealloc)
797 free_extent_state(prealloc);
798
799 return err;
800
801search_again:
802 if (start > end)
803 goto out;
Christoph Hellwig90f1c192007-09-10 20:02:27 -0400804 write_unlock_irqrestore(&tree->lock, flags);
Chris Masona52d9a82007-08-27 16:49:44 -0400805 if (mask & __GFP_WAIT)
806 cond_resched();
807 goto again;
808}
809EXPORT_SYMBOL(set_extent_bit);
810
811/* wrappers around set/clear extent bit */
812int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
813 gfp_t mask)
814{
815 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
816 mask);
817}
818EXPORT_SYMBOL(set_extent_dirty);
819
Chris Mason96b51792007-10-15 16:15:19 -0400820int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
821 int bits, gfp_t mask)
822{
823 return set_extent_bit(tree, start, end, bits, 0, NULL,
824 mask);
825}
826EXPORT_SYMBOL(set_extent_bits);
827
828int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
829 int bits, gfp_t mask)
830{
831 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
832}
833EXPORT_SYMBOL(clear_extent_bits);
834
Chris Masonb888db22007-08-27 16:49:44 -0400835int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
836 gfp_t mask)
837{
838 return set_extent_bit(tree, start, end,
839 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
840 mask);
841}
842EXPORT_SYMBOL(set_extent_delalloc);
843
Chris Masona52d9a82007-08-27 16:49:44 -0400844int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
845 gfp_t mask)
846{
Chris Masonb888db22007-08-27 16:49:44 -0400847 return clear_extent_bit(tree, start, end,
848 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
Chris Masona52d9a82007-08-27 16:49:44 -0400849}
850EXPORT_SYMBOL(clear_extent_dirty);
851
852int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
853 gfp_t mask)
854{
855 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
856 mask);
857}
858EXPORT_SYMBOL(set_extent_new);
859
860int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
861 gfp_t mask)
862{
863 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
864}
865EXPORT_SYMBOL(clear_extent_new);
866
867int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
868 gfp_t mask)
869{
870 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
871 mask);
872}
873EXPORT_SYMBOL(set_extent_uptodate);
874
875int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
876 gfp_t mask)
877{
878 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
879}
880EXPORT_SYMBOL(clear_extent_uptodate);
881
882int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
883 gfp_t mask)
884{
885 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
886 0, NULL, mask);
887}
888EXPORT_SYMBOL(set_extent_writeback);
889
890int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
891 gfp_t mask)
892{
893 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
894}
895EXPORT_SYMBOL(clear_extent_writeback);
896
897int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
898{
899 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
900}
901EXPORT_SYMBOL(wait_on_extent_writeback);
902
903/*
904 * locks a range in ascending order, waiting for any locked regions
905 * it hits on the way. [start,end] are inclusive, and this will sleep.
906 */
907int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
908{
909 int err;
910 u64 failed_start;
911 while (1) {
912 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
913 &failed_start, mask);
914 if (err == -EEXIST && (mask & __GFP_WAIT)) {
915 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
916 start = failed_start;
917 } else {
918 break;
919 }
920 WARN_ON(start > end);
921 }
922 return err;
923}
924EXPORT_SYMBOL(lock_extent);
925
926int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
927 gfp_t mask)
928{
929 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
930}
931EXPORT_SYMBOL(unlock_extent);
932
933/*
934 * helper function to set pages and extents in the tree dirty
935 */
936int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
937{
938 unsigned long index = start >> PAGE_CACHE_SHIFT;
939 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
940 struct page *page;
941
942 while (index <= end_index) {
943 page = find_get_page(tree->mapping, index);
944 BUG_ON(!page);
945 __set_page_dirty_nobuffers(page);
946 page_cache_release(page);
947 index++;
948 }
949 set_extent_dirty(tree, start, end, GFP_NOFS);
950 return 0;
951}
952EXPORT_SYMBOL(set_range_dirty);
953
954/*
955 * helper function to set both pages and extents in the tree writeback
956 */
957int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
958{
959 unsigned long index = start >> PAGE_CACHE_SHIFT;
960 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
961 struct page *page;
962
963 while (index <= end_index) {
964 page = find_get_page(tree->mapping, index);
965 BUG_ON(!page);
966 set_page_writeback(page);
967 page_cache_release(page);
968 index++;
969 }
970 set_extent_writeback(tree, start, end, GFP_NOFS);
971 return 0;
972}
973EXPORT_SYMBOL(set_range_writeback);
974
Chris Mason5f39d392007-10-15 16:14:19 -0400975int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
976 u64 *start_ret, u64 *end_ret, int bits)
977{
978 struct rb_node *node;
979 struct extent_state *state;
980 int ret = 1;
981
Chris Masone19caa52007-10-15 16:17:44 -0400982 read_lock_irq(&tree->lock);
Chris Mason5f39d392007-10-15 16:14:19 -0400983 /*
984 * this search will find all the extents that end after
985 * our range starts.
986 */
987 node = tree_search(&tree->state, start);
988 if (!node || IS_ERR(node)) {
989 goto out;
990 }
991
992 while(1) {
993 state = rb_entry(node, struct extent_state, rb_node);
Chris Masone19caa52007-10-15 16:17:44 -0400994 if (state->end >= start && (state->state & bits)) {
Chris Mason5f39d392007-10-15 16:14:19 -0400995 *start_ret = state->start;
996 *end_ret = state->end;
997 ret = 0;
Chris Masonf510cfe2007-10-15 16:14:48 -0400998 break;
Chris Mason5f39d392007-10-15 16:14:19 -0400999 }
1000 node = rb_next(node);
1001 if (!node)
1002 break;
1003 }
1004out:
Chris Masone19caa52007-10-15 16:17:44 -04001005 read_unlock_irq(&tree->lock);
Chris Mason5f39d392007-10-15 16:14:19 -04001006 return ret;
1007}
1008EXPORT_SYMBOL(find_first_extent_bit);
1009
Chris Masonb888db22007-08-27 16:49:44 -04001010u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1011 u64 start, u64 lock_start, u64 *end, u64 max_bytes)
1012{
1013 struct rb_node *node;
1014 struct extent_state *state;
1015 u64 cur_start = start;
1016 u64 found = 0;
1017 u64 total_bytes = 0;
1018
1019 write_lock_irq(&tree->lock);
1020 /*
1021 * this search will find all the extents that end after
1022 * our range starts.
1023 */
1024search_again:
1025 node = tree_search(&tree->state, cur_start);
1026 if (!node || IS_ERR(node)) {
1027 goto out;
1028 }
1029
1030 while(1) {
1031 state = rb_entry(node, struct extent_state, rb_node);
1032 if (state->start != cur_start) {
1033 goto out;
1034 }
1035 if (!(state->state & EXTENT_DELALLOC)) {
1036 goto out;
1037 }
1038 if (state->start >= lock_start) {
1039 if (state->state & EXTENT_LOCKED) {
1040 DEFINE_WAIT(wait);
1041 atomic_inc(&state->refs);
1042 write_unlock_irq(&tree->lock);
1043 schedule();
1044 write_lock_irq(&tree->lock);
1045 finish_wait(&state->wq, &wait);
1046 free_extent_state(state);
1047 goto search_again;
1048 }
1049 state->state |= EXTENT_LOCKED;
1050 }
1051 found++;
1052 *end = state->end;
1053 cur_start = state->end + 1;
1054 node = rb_next(node);
1055 if (!node)
1056 break;
1057 total_bytes = state->end - state->start + 1;
1058 if (total_bytes >= max_bytes)
1059 break;
1060 }
1061out:
1062 write_unlock_irq(&tree->lock);
1063 return found;
1064}
1065
Chris Masona52d9a82007-08-27 16:49:44 -04001066/*
1067 * helper function to lock both pages and extents in the tree.
1068 * pages must be locked first.
1069 */
1070int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1071{
1072 unsigned long index = start >> PAGE_CACHE_SHIFT;
1073 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1074 struct page *page;
1075 int err;
1076
1077 while (index <= end_index) {
1078 page = grab_cache_page(tree->mapping, index);
1079 if (!page) {
1080 err = -ENOMEM;
1081 goto failed;
1082 }
1083 if (IS_ERR(page)) {
1084 err = PTR_ERR(page);
1085 goto failed;
1086 }
1087 index++;
1088 }
1089 lock_extent(tree, start, end, GFP_NOFS);
1090 return 0;
1091
1092failed:
1093 /*
1094 * we failed above in getting the page at 'index', so we undo here
1095 * up to but not including the page at 'index'
1096 */
1097 end_index = index;
1098 index = start >> PAGE_CACHE_SHIFT;
1099 while (index < end_index) {
1100 page = find_get_page(tree->mapping, index);
1101 unlock_page(page);
1102 page_cache_release(page);
1103 index++;
1104 }
1105 return err;
1106}
1107EXPORT_SYMBOL(lock_range);
1108
1109/*
1110 * helper function to unlock both pages and extents in the tree.
1111 */
1112int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1113{
1114 unsigned long index = start >> PAGE_CACHE_SHIFT;
1115 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1116 struct page *page;
1117
1118 while (index <= end_index) {
1119 page = find_get_page(tree->mapping, index);
1120 unlock_page(page);
1121 page_cache_release(page);
1122 index++;
1123 }
1124 unlock_extent(tree, start, end, GFP_NOFS);
1125 return 0;
1126}
1127EXPORT_SYMBOL(unlock_range);
1128
Chris Mason07157aa2007-08-30 08:50:51 -04001129int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1130{
1131 struct rb_node *node;
1132 struct extent_state *state;
1133 int ret = 0;
1134
1135 write_lock_irq(&tree->lock);
1136 /*
1137 * this search will find all the extents that end after
1138 * our range starts.
1139 */
1140 node = tree_search(&tree->state, start);
1141 if (!node || IS_ERR(node)) {
1142 ret = -ENOENT;
1143 goto out;
1144 }
1145 state = rb_entry(node, struct extent_state, rb_node);
1146 if (state->start != start) {
1147 ret = -ENOENT;
1148 goto out;
1149 }
1150 state->private = private;
1151out:
1152 write_unlock_irq(&tree->lock);
1153 return ret;
Chris Mason07157aa2007-08-30 08:50:51 -04001154}
1155
1156int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1157{
1158 struct rb_node *node;
1159 struct extent_state *state;
1160 int ret = 0;
1161
1162 read_lock_irq(&tree->lock);
1163 /*
1164 * this search will find all the extents that end after
1165 * our range starts.
1166 */
1167 node = tree_search(&tree->state, start);
1168 if (!node || IS_ERR(node)) {
1169 ret = -ENOENT;
1170 goto out;
1171 }
1172 state = rb_entry(node, struct extent_state, rb_node);
1173 if (state->start != start) {
1174 ret = -ENOENT;
1175 goto out;
1176 }
1177 *private = state->private;
1178out:
1179 read_unlock_irq(&tree->lock);
1180 return ret;
1181}
1182
Chris Masona52d9a82007-08-27 16:49:44 -04001183/*
1184 * searches a range in the state tree for a given mask.
1185 * If 'filled' == 1, this returns 1 only if ever extent in the tree
1186 * has the bits set. Otherwise, 1 is returned if any bit in the
1187 * range is found set.
1188 */
Chris Mason1a5bc162007-10-15 16:15:26 -04001189int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1190 int bits, int filled)
Chris Masona52d9a82007-08-27 16:49:44 -04001191{
1192 struct extent_state *state = NULL;
1193 struct rb_node *node;
1194 int bitset = 0;
1195
1196 read_lock_irq(&tree->lock);
1197 node = tree_search(&tree->state, start);
1198 while (node && start <= end) {
1199 state = rb_entry(node, struct extent_state, rb_node);
1200 if (state->start > end)
1201 break;
1202
1203 if (filled && state->start > start) {
1204 bitset = 0;
1205 break;
1206 }
1207 if (state->state & bits) {
1208 bitset = 1;
1209 if (!filled)
1210 break;
1211 } else if (filled) {
1212 bitset = 0;
1213 break;
1214 }
1215 start = state->end + 1;
1216 if (start > end)
1217 break;
1218 node = rb_next(node);
1219 }
1220 read_unlock_irq(&tree->lock);
1221 return bitset;
1222}
Chris Mason1a5bc162007-10-15 16:15:26 -04001223EXPORT_SYMBOL(test_range_bit);
Chris Masona52d9a82007-08-27 16:49:44 -04001224
1225/*
1226 * helper function to set a given page up to date if all the
1227 * extents in the tree for that page are up to date
1228 */
1229static int check_page_uptodate(struct extent_map_tree *tree,
1230 struct page *page)
1231{
Chris Mason35ebb932007-10-30 16:56:53 -04001232 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
Chris Masona52d9a82007-08-27 16:49:44 -04001233 u64 end = start + PAGE_CACHE_SIZE - 1;
1234 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1235 SetPageUptodate(page);
1236 return 0;
1237}
1238
1239/*
1240 * helper function to unlock a page if all the extents in the tree
1241 * for that page are unlocked
1242 */
1243static int check_page_locked(struct extent_map_tree *tree,
1244 struct page *page)
1245{
Chris Mason35ebb932007-10-30 16:56:53 -04001246 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
Chris Masona52d9a82007-08-27 16:49:44 -04001247 u64 end = start + PAGE_CACHE_SIZE - 1;
1248 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1249 unlock_page(page);
1250 return 0;
1251}
1252
1253/*
1254 * helper function to end page writeback if all the extents
1255 * in the tree for that page are done with writeback
1256 */
1257static int check_page_writeback(struct extent_map_tree *tree,
1258 struct page *page)
1259{
Chris Mason35ebb932007-10-30 16:56:53 -04001260 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
Chris Masona52d9a82007-08-27 16:49:44 -04001261 u64 end = start + PAGE_CACHE_SIZE - 1;
1262 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1263 end_page_writeback(page);
1264 return 0;
1265}
1266
1267/* lots and lots of room for performance fixes in the end_bio funcs */
1268
1269/*
1270 * after a writepage IO is done, we need to:
1271 * clear the uptodate bits on error
1272 * clear the writeback bits in the extent tree for this IO
1273 * end_page_writeback if the page has no more pending IO
1274 *
1275 * Scheduling is not allowed, so the extent state tree is expected
1276 * to have one and only one object corresponding to this IO.
1277 */
Jens Axboe0a2118d2007-10-19 09:23:05 -04001278#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1279static void end_bio_extent_writepage(struct bio *bio, int err)
1280#else
Chris Masona52d9a82007-08-27 16:49:44 -04001281static int end_bio_extent_writepage(struct bio *bio,
1282 unsigned int bytes_done, int err)
Jens Axboe0a2118d2007-10-19 09:23:05 -04001283#endif
Chris Masona52d9a82007-08-27 16:49:44 -04001284{
1285 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1286 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1287 struct extent_map_tree *tree = bio->bi_private;
1288 u64 start;
1289 u64 end;
1290 int whole_page;
1291
Jens Axboe0a2118d2007-10-19 09:23:05 -04001292#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
Chris Masona52d9a82007-08-27 16:49:44 -04001293 if (bio->bi_size)
1294 return 1;
Jens Axboe0a2118d2007-10-19 09:23:05 -04001295#endif
Chris Masona52d9a82007-08-27 16:49:44 -04001296
1297 do {
1298 struct page *page = bvec->bv_page;
Chris Mason35ebb932007-10-30 16:56:53 -04001299 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1300 bvec->bv_offset;
Chris Masona52d9a82007-08-27 16:49:44 -04001301 end = start + bvec->bv_len - 1;
1302
1303 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1304 whole_page = 1;
1305 else
1306 whole_page = 0;
1307
1308 if (--bvec >= bio->bi_io_vec)
1309 prefetchw(&bvec->bv_page->flags);
1310
1311 if (!uptodate) {
1312 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1313 ClearPageUptodate(page);
1314 SetPageError(page);
1315 }
1316 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1317
1318 if (whole_page)
1319 end_page_writeback(page);
1320 else
1321 check_page_writeback(tree, page);
Christoph Hellwig0e2752a2007-09-10 20:02:33 -04001322 if (tree->ops && tree->ops->writepage_end_io_hook)
1323 tree->ops->writepage_end_io_hook(page, start, end);
Chris Masona52d9a82007-08-27 16:49:44 -04001324 } while (bvec >= bio->bi_io_vec);
1325
1326 bio_put(bio);
Jens Axboe0a2118d2007-10-19 09:23:05 -04001327#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
Chris Masona52d9a82007-08-27 16:49:44 -04001328 return 0;
Jens Axboe0a2118d2007-10-19 09:23:05 -04001329#endif
Chris Masona52d9a82007-08-27 16:49:44 -04001330}
1331
1332/*
1333 * after a readpage IO is done, we need to:
1334 * clear the uptodate bits on error
1335 * set the uptodate bits if things worked
1336 * set the page up to date if all extents in the tree are uptodate
1337 * clear the lock bit in the extent tree
1338 * unlock the page if there are no other extents locked for it
1339 *
1340 * Scheduling is not allowed, so the extent state tree is expected
1341 * to have one and only one object corresponding to this IO.
1342 */
Jens Axboe0a2118d2007-10-19 09:23:05 -04001343#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1344static void end_bio_extent_readpage(struct bio *bio, int err)
1345#else
Chris Masona52d9a82007-08-27 16:49:44 -04001346static int end_bio_extent_readpage(struct bio *bio,
1347 unsigned int bytes_done, int err)
Jens Axboe0a2118d2007-10-19 09:23:05 -04001348#endif
Chris Masona52d9a82007-08-27 16:49:44 -04001349{
Chris Mason07157aa2007-08-30 08:50:51 -04001350 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
Chris Masona52d9a82007-08-27 16:49:44 -04001351 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1352 struct extent_map_tree *tree = bio->bi_private;
1353 u64 start;
1354 u64 end;
1355 int whole_page;
Chris Mason07157aa2007-08-30 08:50:51 -04001356 int ret;
Chris Masona52d9a82007-08-27 16:49:44 -04001357
Jens Axboe0a2118d2007-10-19 09:23:05 -04001358#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
Chris Masona52d9a82007-08-27 16:49:44 -04001359 if (bio->bi_size)
1360 return 1;
Jens Axboe0a2118d2007-10-19 09:23:05 -04001361#endif
Chris Masona52d9a82007-08-27 16:49:44 -04001362
1363 do {
1364 struct page *page = bvec->bv_page;
Chris Mason35ebb932007-10-30 16:56:53 -04001365 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1366 bvec->bv_offset;
Chris Masona52d9a82007-08-27 16:49:44 -04001367 end = start + bvec->bv_len - 1;
1368
1369 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1370 whole_page = 1;
1371 else
1372 whole_page = 0;
1373
1374 if (--bvec >= bio->bi_io_vec)
1375 prefetchw(&bvec->bv_page->flags);
1376
Chris Mason07157aa2007-08-30 08:50:51 -04001377 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1378 ret = tree->ops->readpage_end_io_hook(page, start, end);
1379 if (ret)
1380 uptodate = 0;
1381 }
Chris Masona52d9a82007-08-27 16:49:44 -04001382 if (uptodate) {
1383 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1384 if (whole_page)
1385 SetPageUptodate(page);
1386 else
1387 check_page_uptodate(tree, page);
1388 } else {
1389 ClearPageUptodate(page);
1390 SetPageError(page);
1391 }
1392
1393 unlock_extent(tree, start, end, GFP_ATOMIC);
1394
1395 if (whole_page)
1396 unlock_page(page);
1397 else
1398 check_page_locked(tree, page);
1399 } while (bvec >= bio->bi_io_vec);
1400
1401 bio_put(bio);
Jens Axboe0a2118d2007-10-19 09:23:05 -04001402#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
Chris Masona52d9a82007-08-27 16:49:44 -04001403 return 0;
Jens Axboe0a2118d2007-10-19 09:23:05 -04001404#endif
Chris Masona52d9a82007-08-27 16:49:44 -04001405}
1406
1407/*
1408 * IO done from prepare_write is pretty simple, we just unlock
1409 * the structs in the extent tree when done, and set the uptodate bits
1410 * as appropriate.
1411 */
Jens Axboe0a2118d2007-10-19 09:23:05 -04001412#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1413static void end_bio_extent_preparewrite(struct bio *bio, int err)
1414#else
Chris Masona52d9a82007-08-27 16:49:44 -04001415static int end_bio_extent_preparewrite(struct bio *bio,
1416 unsigned int bytes_done, int err)
Jens Axboe0a2118d2007-10-19 09:23:05 -04001417#endif
Chris Masona52d9a82007-08-27 16:49:44 -04001418{
1419 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1420 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1421 struct extent_map_tree *tree = bio->bi_private;
1422 u64 start;
1423 u64 end;
1424
Jens Axboe0a2118d2007-10-19 09:23:05 -04001425#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
Chris Masona52d9a82007-08-27 16:49:44 -04001426 if (bio->bi_size)
1427 return 1;
Jens Axboe0a2118d2007-10-19 09:23:05 -04001428#endif
Chris Masona52d9a82007-08-27 16:49:44 -04001429
1430 do {
1431 struct page *page = bvec->bv_page;
Chris Mason35ebb932007-10-30 16:56:53 -04001432 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1433 bvec->bv_offset;
Chris Masona52d9a82007-08-27 16:49:44 -04001434 end = start + bvec->bv_len - 1;
1435
1436 if (--bvec >= bio->bi_io_vec)
1437 prefetchw(&bvec->bv_page->flags);
1438
1439 if (uptodate) {
1440 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1441 } else {
1442 ClearPageUptodate(page);
1443 SetPageError(page);
1444 }
1445
1446 unlock_extent(tree, start, end, GFP_ATOMIC);
1447
1448 } while (bvec >= bio->bi_io_vec);
1449
1450 bio_put(bio);
Jens Axboe0a2118d2007-10-19 09:23:05 -04001451#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
Chris Masona52d9a82007-08-27 16:49:44 -04001452 return 0;
Jens Axboe0a2118d2007-10-19 09:23:05 -04001453#endif
Chris Masona52d9a82007-08-27 16:49:44 -04001454}
1455
1456static int submit_extent_page(int rw, struct extent_map_tree *tree,
1457 struct page *page, sector_t sector,
1458 size_t size, unsigned long offset,
1459 struct block_device *bdev,
1460 bio_end_io_t end_io_func)
1461{
1462 struct bio *bio;
1463 int ret = 0;
1464
1465 bio = bio_alloc(GFP_NOIO, 1);
1466
1467 bio->bi_sector = sector;
1468 bio->bi_bdev = bdev;
1469 bio->bi_io_vec[0].bv_page = page;
1470 bio->bi_io_vec[0].bv_len = size;
1471 bio->bi_io_vec[0].bv_offset = offset;
1472
1473 bio->bi_vcnt = 1;
1474 bio->bi_idx = 0;
1475 bio->bi_size = size;
1476
1477 bio->bi_end_io = end_io_func;
1478 bio->bi_private = tree;
1479
1480 bio_get(bio);
1481 submit_bio(rw, bio);
1482
1483 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1484 ret = -EOPNOTSUPP;
1485
1486 bio_put(bio);
1487 return ret;
1488}
1489
Christoph Hellwigb3cfa352007-09-17 11:25:58 -04001490void set_page_extent_mapped(struct page *page)
1491{
1492 if (!PagePrivate(page)) {
1493 SetPagePrivate(page);
1494 WARN_ON(!page->mapping->a_ops->invalidatepage);
Chris Mason19c00dd2007-10-15 16:19:22 -04001495 set_page_private(page, EXTENT_PAGE_PRIVATE);
Christoph Hellwigb3cfa352007-09-17 11:25:58 -04001496 page_cache_get(page);
1497 }
1498}
1499
Chris Masona52d9a82007-08-27 16:49:44 -04001500/*
1501 * basic readpage implementation. Locked extent state structs are inserted
1502 * into the tree that are removed when the IO is done (by the end_io
1503 * handlers)
1504 */
1505int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1506 get_extent_t *get_extent)
1507{
1508 struct inode *inode = page->mapping->host;
Chris Mason35ebb932007-10-30 16:56:53 -04001509 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
Chris Masona52d9a82007-08-27 16:49:44 -04001510 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1511 u64 end;
1512 u64 cur = start;
1513 u64 extent_offset;
1514 u64 last_byte = i_size_read(inode);
1515 u64 block_start;
1516 u64 cur_end;
1517 sector_t sector;
1518 struct extent_map *em;
1519 struct block_device *bdev;
1520 int ret;
1521 int nr = 0;
1522 size_t page_offset = 0;
1523 size_t iosize;
1524 size_t blocksize = inode->i_sb->s_blocksize;
1525
Christoph Hellwigb3cfa352007-09-17 11:25:58 -04001526 set_page_extent_mapped(page);
Chris Masona52d9a82007-08-27 16:49:44 -04001527
1528 end = page_end;
1529 lock_extent(tree, start, end, GFP_NOFS);
1530
1531 while (cur <= end) {
1532 if (cur >= last_byte) {
1533 iosize = PAGE_CACHE_SIZE - page_offset;
1534 zero_user_page(page, page_offset, iosize, KM_USER0);
1535 set_extent_uptodate(tree, cur, cur + iosize - 1,
1536 GFP_NOFS);
1537 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1538 break;
1539 }
1540 em = get_extent(inode, page, page_offset, cur, end, 0);
1541 if (IS_ERR(em) || !em) {
1542 SetPageError(page);
1543 unlock_extent(tree, cur, end, GFP_NOFS);
1544 break;
1545 }
1546
1547 extent_offset = cur - em->start;
1548 BUG_ON(em->end < cur);
1549 BUG_ON(end < cur);
1550
1551 iosize = min(em->end - cur, end - cur) + 1;
1552 cur_end = min(em->end, end);
1553 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1554 sector = (em->block_start + extent_offset) >> 9;
1555 bdev = em->bdev;
1556 block_start = em->block_start;
1557 free_extent_map(em);
1558 em = NULL;
1559
1560 /* we've found a hole, just zero and go on */
Chris Mason5f39d392007-10-15 16:14:19 -04001561 if (block_start == EXTENT_MAP_HOLE) {
Chris Masona52d9a82007-08-27 16:49:44 -04001562 zero_user_page(page, page_offset, iosize, KM_USER0);
1563 set_extent_uptodate(tree, cur, cur + iosize - 1,
1564 GFP_NOFS);
1565 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1566 cur = cur + iosize;
1567 page_offset += iosize;
1568 continue;
1569 }
1570 /* the get_extent function already copied into the page */
1571 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1572 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1573 cur = cur + iosize;
1574 page_offset += iosize;
1575 continue;
1576 }
1577
Chris Mason07157aa2007-08-30 08:50:51 -04001578 ret = 0;
1579 if (tree->ops && tree->ops->readpage_io_hook) {
1580 ret = tree->ops->readpage_io_hook(page, cur,
1581 cur + iosize - 1);
1582 }
1583 if (!ret) {
1584 ret = submit_extent_page(READ, tree, page,
1585 sector, iosize, page_offset,
1586 bdev, end_bio_extent_readpage);
1587 }
Chris Masona52d9a82007-08-27 16:49:44 -04001588 if (ret)
1589 SetPageError(page);
1590 cur = cur + iosize;
1591 page_offset += iosize;
1592 nr++;
1593 }
1594 if (!nr) {
1595 if (!PageError(page))
1596 SetPageUptodate(page);
1597 unlock_page(page);
1598 }
1599 return 0;
1600}
1601EXPORT_SYMBOL(extent_read_full_page);
1602
1603/*
1604 * the writepage semantics are similar to regular writepage. extent
1605 * records are inserted to lock ranges in the tree, and as dirty areas
1606 * are found, they are marked writeback. Then the lock bits are removed
1607 * and the end_io handler clears the writeback ranges
1608 */
1609int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1610 get_extent_t *get_extent,
1611 struct writeback_control *wbc)
1612{
1613 struct inode *inode = page->mapping->host;
Chris Mason35ebb932007-10-30 16:56:53 -04001614 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
Chris Masona52d9a82007-08-27 16:49:44 -04001615 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1616 u64 end;
1617 u64 cur = start;
1618 u64 extent_offset;
1619 u64 last_byte = i_size_read(inode);
1620 u64 block_start;
1621 sector_t sector;
1622 struct extent_map *em;
1623 struct block_device *bdev;
1624 int ret;
1625 int nr = 0;
1626 size_t page_offset = 0;
1627 size_t iosize;
1628 size_t blocksize;
1629 loff_t i_size = i_size_read(inode);
1630 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
Chris Masonb888db22007-08-27 16:49:44 -04001631 u64 nr_delalloc;
1632 u64 delalloc_end;
Chris Masona52d9a82007-08-27 16:49:44 -04001633
Chris Masonb888db22007-08-27 16:49:44 -04001634 WARN_ON(!PageLocked(page));
Chris Masona52d9a82007-08-27 16:49:44 -04001635 if (page->index > end_index) {
1636 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1637 unlock_page(page);
1638 return 0;
1639 }
1640
1641 if (page->index == end_index) {
1642 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1643 zero_user_page(page, offset,
1644 PAGE_CACHE_SIZE - offset, KM_USER0);
1645 }
1646
Christoph Hellwigb3cfa352007-09-17 11:25:58 -04001647 set_page_extent_mapped(page);
Chris Masona52d9a82007-08-27 16:49:44 -04001648
Chris Masona52d9a82007-08-27 16:49:44 -04001649 lock_extent(tree, start, page_end, GFP_NOFS);
Chris Masonb888db22007-08-27 16:49:44 -04001650 nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1,
1651 &delalloc_end,
1652 128 * 1024 * 1024);
1653 if (nr_delalloc) {
Chris Mason07157aa2007-08-30 08:50:51 -04001654 tree->ops->fill_delalloc(inode, start, delalloc_end);
Chris Masonb888db22007-08-27 16:49:44 -04001655 if (delalloc_end >= page_end + 1) {
1656 clear_extent_bit(tree, page_end + 1, delalloc_end,
1657 EXTENT_LOCKED | EXTENT_DELALLOC,
1658 1, 0, GFP_NOFS);
1659 }
1660 clear_extent_bit(tree, start, page_end, EXTENT_DELALLOC,
1661 0, 0, GFP_NOFS);
1662 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1663 printk("found delalloc bits after clear extent_bit\n");
1664 }
1665 } else if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1666 printk("found delalloc bits after find_delalloc_range returns 0\n");
1667 }
1668
1669 end = page_end;
1670 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1671 printk("found delalloc bits after lock_extent\n");
1672 }
Chris Masona52d9a82007-08-27 16:49:44 -04001673
1674 if (last_byte <= start) {
1675 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1676 goto done;
1677 }
1678
1679 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1680 blocksize = inode->i_sb->s_blocksize;
1681
1682 while (cur <= end) {
1683 if (cur >= last_byte) {
1684 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1685 break;
1686 }
Chris Masonb888db22007-08-27 16:49:44 -04001687 em = get_extent(inode, page, page_offset, cur, end, 0);
Chris Masona52d9a82007-08-27 16:49:44 -04001688 if (IS_ERR(em) || !em) {
1689 SetPageError(page);
1690 break;
1691 }
1692
1693 extent_offset = cur - em->start;
1694 BUG_ON(em->end < cur);
1695 BUG_ON(end < cur);
1696 iosize = min(em->end - cur, end - cur) + 1;
1697 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1698 sector = (em->block_start + extent_offset) >> 9;
1699 bdev = em->bdev;
1700 block_start = em->block_start;
1701 free_extent_map(em);
1702 em = NULL;
1703
Chris Mason5f39d392007-10-15 16:14:19 -04001704 if (block_start == EXTENT_MAP_HOLE ||
1705 block_start == EXTENT_MAP_INLINE) {
Chris Masona52d9a82007-08-27 16:49:44 -04001706 clear_extent_dirty(tree, cur,
1707 cur + iosize - 1, GFP_NOFS);
1708 cur = cur + iosize;
1709 page_offset += iosize;
1710 continue;
1711 }
1712
1713 /* leave this out until we have a page_mkwrite call */
1714 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1715 EXTENT_DIRTY, 0)) {
1716 cur = cur + iosize;
1717 page_offset += iosize;
1718 continue;
1719 }
1720 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
Christoph Hellwigb06355f2007-09-10 20:02:32 -04001721 if (tree->ops && tree->ops->writepage_io_hook) {
1722 ret = tree->ops->writepage_io_hook(page, cur,
1723 cur + iosize - 1);
1724 } else {
1725 ret = 0;
1726 }
Chris Masona52d9a82007-08-27 16:49:44 -04001727 if (ret)
1728 SetPageError(page);
Chris Mason07157aa2007-08-30 08:50:51 -04001729 else {
1730 set_range_writeback(tree, cur, cur + iosize - 1);
1731 ret = submit_extent_page(WRITE, tree, page, sector,
1732 iosize, page_offset, bdev,
1733 end_bio_extent_writepage);
1734 if (ret)
1735 SetPageError(page);
1736 }
Chris Masona52d9a82007-08-27 16:49:44 -04001737 cur = cur + iosize;
1738 page_offset += iosize;
1739 nr++;
1740 }
1741done:
Chris Masona52d9a82007-08-27 16:49:44 -04001742 unlock_extent(tree, start, page_end, GFP_NOFS);
1743 unlock_page(page);
1744 return 0;
1745}
1746EXPORT_SYMBOL(extent_write_full_page);
1747
1748/*
1749 * basic invalidatepage code, this waits on any locked or writeback
1750 * ranges corresponding to the page, and then deletes any extent state
1751 * records from the tree
1752 */
1753int extent_invalidatepage(struct extent_map_tree *tree,
1754 struct page *page, unsigned long offset)
1755{
Chris Mason35ebb932007-10-30 16:56:53 -04001756 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
Chris Masona52d9a82007-08-27 16:49:44 -04001757 u64 end = start + PAGE_CACHE_SIZE - 1;
1758 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
1759
1760 start += (offset + blocksize -1) & ~(blocksize - 1);
1761 if (start > end)
1762 return 0;
1763
1764 lock_extent(tree, start, end, GFP_NOFS);
1765 wait_on_extent_writeback(tree, start, end);
Chris Mason2bf5a722007-08-30 11:54:02 -04001766 clear_extent_bit(tree, start, end,
1767 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
Chris Masona52d9a82007-08-27 16:49:44 -04001768 1, 1, GFP_NOFS);
1769 return 0;
1770}
1771EXPORT_SYMBOL(extent_invalidatepage);
1772
1773/*
1774 * simple commit_write call, set_range_dirty is used to mark both
1775 * the pages and the extent records as dirty
1776 */
1777int extent_commit_write(struct extent_map_tree *tree,
1778 struct inode *inode, struct page *page,
1779 unsigned from, unsigned to)
1780{
1781 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1782
Christoph Hellwigb3cfa352007-09-17 11:25:58 -04001783 set_page_extent_mapped(page);
Chris Masona52d9a82007-08-27 16:49:44 -04001784 set_page_dirty(page);
1785
1786 if (pos > inode->i_size) {
1787 i_size_write(inode, pos);
1788 mark_inode_dirty(inode);
1789 }
1790 return 0;
1791}
1792EXPORT_SYMBOL(extent_commit_write);
1793
1794int extent_prepare_write(struct extent_map_tree *tree,
1795 struct inode *inode, struct page *page,
1796 unsigned from, unsigned to, get_extent_t *get_extent)
1797{
Chris Mason35ebb932007-10-30 16:56:53 -04001798 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
Chris Masona52d9a82007-08-27 16:49:44 -04001799 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
1800 u64 block_start;
1801 u64 orig_block_start;
1802 u64 block_end;
1803 u64 cur_end;
1804 struct extent_map *em;
1805 unsigned blocksize = 1 << inode->i_blkbits;
1806 size_t page_offset = 0;
1807 size_t block_off_start;
1808 size_t block_off_end;
1809 int err = 0;
1810 int iocount = 0;
1811 int ret = 0;
1812 int isnew;
1813
Christoph Hellwigb3cfa352007-09-17 11:25:58 -04001814 set_page_extent_mapped(page);
1815
Chris Masona52d9a82007-08-27 16:49:44 -04001816 block_start = (page_start + from) & ~((u64)blocksize - 1);
1817 block_end = (page_start + to - 1) | (blocksize - 1);
1818 orig_block_start = block_start;
1819
1820 lock_extent(tree, page_start, page_end, GFP_NOFS);
1821 while(block_start <= block_end) {
1822 em = get_extent(inode, page, page_offset, block_start,
1823 block_end, 1);
1824 if (IS_ERR(em) || !em) {
1825 goto err;
1826 }
1827 cur_end = min(block_end, em->end);
1828 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
1829 block_off_end = block_off_start + blocksize;
1830 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
1831
1832 if (!PageUptodate(page) && isnew &&
1833 (block_off_end > to || block_off_start < from)) {
1834 void *kaddr;
1835
1836 kaddr = kmap_atomic(page, KM_USER0);
1837 if (block_off_end > to)
1838 memset(kaddr + to, 0, block_off_end - to);
1839 if (block_off_start < from)
1840 memset(kaddr + block_off_start, 0,
1841 from - block_off_start);
1842 flush_dcache_page(page);
1843 kunmap_atomic(kaddr, KM_USER0);
1844 }
1845 if (!isnew && !PageUptodate(page) &&
1846 (block_off_end > to || block_off_start < from) &&
1847 !test_range_bit(tree, block_start, cur_end,
1848 EXTENT_UPTODATE, 1)) {
1849 u64 sector;
1850 u64 extent_offset = block_start - em->start;
1851 size_t iosize;
1852 sector = (em->block_start + extent_offset) >> 9;
1853 iosize = (cur_end - block_start + blocksize - 1) &
1854 ~((u64)blocksize - 1);
1855 /*
1856 * we've already got the extent locked, but we
1857 * need to split the state such that our end_bio
1858 * handler can clear the lock.
1859 */
1860 set_extent_bit(tree, block_start,
1861 block_start + iosize - 1,
1862 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
1863 ret = submit_extent_page(READ, tree, page,
1864 sector, iosize, page_offset, em->bdev,
1865 end_bio_extent_preparewrite);
1866 iocount++;
1867 block_start = block_start + iosize;
1868 } else {
1869 set_extent_uptodate(tree, block_start, cur_end,
1870 GFP_NOFS);
1871 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
1872 block_start = cur_end + 1;
1873 }
1874 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
1875 free_extent_map(em);
1876 }
1877 if (iocount) {
1878 wait_extent_bit(tree, orig_block_start,
1879 block_end, EXTENT_LOCKED);
1880 }
1881 check_page_uptodate(tree, page);
1882err:
1883 /* FIXME, zero out newly allocated blocks on error */
1884 return err;
1885}
1886EXPORT_SYMBOL(extent_prepare_write);
1887
1888/*
1889 * a helper for releasepage. As long as there are no locked extents
1890 * in the range corresponding to the page, both state records and extent
1891 * map records are removed
1892 */
1893int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
1894{
1895 struct extent_map *em;
Chris Mason35ebb932007-10-30 16:56:53 -04001896 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
Chris Masona52d9a82007-08-27 16:49:44 -04001897 u64 end = start + PAGE_CACHE_SIZE - 1;
1898 u64 orig_start = start;
Chris Masonb888db22007-08-27 16:49:44 -04001899 int ret = 1;
Chris Masona52d9a82007-08-27 16:49:44 -04001900
1901 while (start <= end) {
1902 em = lookup_extent_mapping(tree, start, end);
1903 if (!em || IS_ERR(em))
1904 break;
Chris Masonb888db22007-08-27 16:49:44 -04001905 if (!test_range_bit(tree, em->start, em->end,
1906 EXTENT_LOCKED, 0)) {
1907 remove_extent_mapping(tree, em);
1908 /* once for the rb tree */
Chris Masona52d9a82007-08-27 16:49:44 -04001909 free_extent_map(em);
Chris Masona52d9a82007-08-27 16:49:44 -04001910 }
Chris Masona52d9a82007-08-27 16:49:44 -04001911 start = em->end + 1;
Chris Masona52d9a82007-08-27 16:49:44 -04001912 /* once for us */
1913 free_extent_map(em);
1914 }
Chris Masonb888db22007-08-27 16:49:44 -04001915 if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
1916 ret = 0;
1917 else
1918 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
1919 1, 1, GFP_NOFS);
1920 return ret;
Chris Masona52d9a82007-08-27 16:49:44 -04001921}
1922EXPORT_SYMBOL(try_release_extent_mapping);
1923
Christoph Hellwigd396c6f2007-09-10 20:02:30 -04001924sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
1925 get_extent_t *get_extent)
1926{
1927 struct inode *inode = mapping->host;
1928 u64 start = iblock << inode->i_blkbits;
1929 u64 end = start + (1 << inode->i_blkbits) - 1;
Yanc67cda12007-10-29 11:41:05 -04001930 sector_t sector = 0;
Christoph Hellwigd396c6f2007-09-10 20:02:30 -04001931 struct extent_map *em;
1932
1933 em = get_extent(inode, NULL, 0, start, end, 0);
1934 if (!em || IS_ERR(em))
1935 return 0;
1936
Christoph Hellwigd396c6f2007-09-10 20:02:30 -04001937 if (em->block_start == EXTENT_MAP_INLINE ||
Chris Mason5f39d392007-10-15 16:14:19 -04001938 em->block_start == EXTENT_MAP_HOLE)
Yanc67cda12007-10-29 11:41:05 -04001939 goto out;
Christoph Hellwigd396c6f2007-09-10 20:02:30 -04001940
Yanc67cda12007-10-29 11:41:05 -04001941 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
1942out:
1943 free_extent_map(em);
1944 return sector;
Christoph Hellwigd396c6f2007-09-10 20:02:30 -04001945}
Chris Mason5f39d392007-10-15 16:14:19 -04001946
Chris Mason4dc11902007-10-15 16:18:14 -04001947static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
Chris Mason6d36dcd2007-10-15 16:14:37 -04001948{
Chris Mason4dc11902007-10-15 16:18:14 -04001949 if (list_empty(&eb->lru)) {
1950 extent_buffer_get(eb);
1951 list_add(&eb->lru, &tree->buffer_lru);
1952 tree->lru_size++;
1953 if (tree->lru_size >= BUFFER_LRU_MAX) {
1954 struct extent_buffer *rm;
1955 rm = list_entry(tree->buffer_lru.prev,
1956 struct extent_buffer, lru);
1957 tree->lru_size--;
1958 list_del(&rm->lru);
1959 free_extent_buffer(rm);
1960 }
1961 } else
1962 list_move(&eb->lru, &tree->buffer_lru);
1963 return 0;
Chris Mason6d36dcd2007-10-15 16:14:37 -04001964}
Chris Mason4dc11902007-10-15 16:18:14 -04001965static struct extent_buffer *find_lru(struct extent_map_tree *tree,
1966 u64 start, unsigned long len)
Chris Mason6d36dcd2007-10-15 16:14:37 -04001967{
Chris Mason4dc11902007-10-15 16:18:14 -04001968 struct list_head *lru = &tree->buffer_lru;
1969 struct list_head *cur = lru->next;
1970 struct extent_buffer *eb;
Chris Masonf510cfe2007-10-15 16:14:48 -04001971
Chris Mason4dc11902007-10-15 16:18:14 -04001972 if (list_empty(lru))
1973 return NULL;
Chris Masonf510cfe2007-10-15 16:14:48 -04001974
Chris Mason4dc11902007-10-15 16:18:14 -04001975 do {
1976 eb = list_entry(cur, struct extent_buffer, lru);
1977 if (eb->start == start && eb->len == len) {
1978 extent_buffer_get(eb);
1979 return eb;
1980 }
1981 cur = cur->next;
1982 } while (cur != lru);
1983 return NULL;
Chris Mason6d36dcd2007-10-15 16:14:37 -04001984}
1985
Chris Masondb945352007-10-15 16:15:53 -04001986static inline unsigned long num_extent_pages(u64 start, u64 len)
1987{
1988 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
1989 (start >> PAGE_CACHE_SHIFT);
1990}
Chris Mason4dc11902007-10-15 16:18:14 -04001991
1992static inline struct page *extent_buffer_page(struct extent_buffer *eb,
1993 unsigned long i)
1994{
1995 struct page *p;
Chris Mason3685f792007-10-19 09:23:27 -04001996 struct address_space *mapping;
Chris Mason4dc11902007-10-15 16:18:14 -04001997
1998 if (i == 0)
Chris Mason810191f2007-10-15 16:18:55 -04001999 return eb->first_page;
Chris Mason4dc11902007-10-15 16:18:14 -04002000 i += eb->start >> PAGE_CACHE_SHIFT;
Chris Mason3685f792007-10-19 09:23:27 -04002001 mapping = eb->first_page->mapping;
2002 read_lock_irq(&mapping->tree_lock);
2003 p = radix_tree_lookup(&mapping->page_tree, i);
2004 read_unlock_irq(&mapping->tree_lock);
Chris Mason4dc11902007-10-15 16:18:14 -04002005 return p;
2006}
2007
2008static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2009 u64 start,
2010 unsigned long len,
2011 gfp_t mask)
2012{
2013 struct extent_buffer *eb = NULL;
2014
2015 spin_lock(&tree->lru_lock);
2016 eb = find_lru(tree, start, len);
Chris Mason19c00dd2007-10-15 16:19:22 -04002017 if (eb) {
Chris Mason4dc11902007-10-15 16:18:14 -04002018 goto lru_add;
Chris Mason19c00dd2007-10-15 16:19:22 -04002019 }
Chris Mason4dc11902007-10-15 16:18:14 -04002020 spin_unlock(&tree->lru_lock);
2021
2022 if (eb) {
2023 memset(eb, 0, sizeof(*eb));
2024 } else {
2025 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2026 }
2027 INIT_LIST_HEAD(&eb->lru);
2028 eb->start = start;
2029 eb->len = len;
2030 atomic_set(&eb->refs, 1);
2031
2032 spin_lock(&tree->lru_lock);
2033lru_add:
2034 add_lru(tree, eb);
2035 spin_unlock(&tree->lru_lock);
2036 return eb;
2037}
2038
2039static void __free_extent_buffer(struct extent_buffer *eb)
2040{
2041 kmem_cache_free(extent_buffer_cache, eb);
2042}
2043
Chris Mason5f39d392007-10-15 16:14:19 -04002044struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2045 u64 start, unsigned long len,
Chris Mason19c00dd2007-10-15 16:19:22 -04002046 struct page *page0,
Chris Mason5f39d392007-10-15 16:14:19 -04002047 gfp_t mask)
2048{
Chris Masondb945352007-10-15 16:15:53 -04002049 unsigned long num_pages = num_extent_pages(start, len);
Chris Mason5f39d392007-10-15 16:14:19 -04002050 unsigned long i;
2051 unsigned long index = start >> PAGE_CACHE_SHIFT;
2052 struct extent_buffer *eb;
2053 struct page *p;
2054 struct address_space *mapping = tree->mapping;
Yan65555a02007-10-25 15:42:57 -04002055 int uptodate = 1;
Chris Mason5f39d392007-10-15 16:14:19 -04002056
Chris Mason4dc11902007-10-15 16:18:14 -04002057 eb = __alloc_extent_buffer(tree, start, len, mask);
Chris Mason5f39d392007-10-15 16:14:19 -04002058 if (!eb || IS_ERR(eb))
2059 return NULL;
2060
Chris Mason4dc11902007-10-15 16:18:14 -04002061 if (eb->flags & EXTENT_BUFFER_FILLED)
2062 return eb;
Chris Mason5f39d392007-10-15 16:14:19 -04002063
Chris Mason19c00dd2007-10-15 16:19:22 -04002064 if (page0) {
2065 eb->first_page = page0;
2066 i = 1;
2067 index++;
2068 page_cache_get(page0);
Chris Masonff79f812007-10-15 16:22:25 -04002069 mark_page_accessed(page0);
Chris Mason19c00dd2007-10-15 16:19:22 -04002070 set_page_extent_mapped(page0);
2071 set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2072 len << 2);
2073 } else {
2074 i = 0;
2075 }
2076 for (; i < num_pages; i++, index++) {
Chris Mason5f39d392007-10-15 16:14:19 -04002077 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
Chris Mason6d36dcd2007-10-15 16:14:37 -04002078 if (!p) {
Chris Masondb945352007-10-15 16:15:53 -04002079 WARN_ON(1);
Chris Mason6d36dcd2007-10-15 16:14:37 -04002080 /* make sure the free only frees the pages we've
2081 * grabbed a reference on
2082 */
2083 eb->len = i << PAGE_CACHE_SHIFT;
2084 eb->start &= ~((u64)PAGE_CACHE_SIZE - 1);
Chris Mason5f39d392007-10-15 16:14:19 -04002085 goto fail;
Chris Mason6d36dcd2007-10-15 16:14:37 -04002086 }
Chris Masonf510cfe2007-10-15 16:14:48 -04002087 set_page_extent_mapped(p);
Chris Masonff79f812007-10-15 16:22:25 -04002088 mark_page_accessed(p);
Chris Mason19c00dd2007-10-15 16:19:22 -04002089 if (i == 0) {
Chris Mason810191f2007-10-15 16:18:55 -04002090 eb->first_page = p;
Chris Mason19c00dd2007-10-15 16:19:22 -04002091 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2092 len << 2);
2093 } else {
2094 set_page_private(p, EXTENT_PAGE_PRIVATE);
2095 }
Chris Mason5f39d392007-10-15 16:14:19 -04002096 if (!PageUptodate(p))
2097 uptodate = 0;
2098 unlock_page(p);
2099 }
2100 if (uptodate)
2101 eb->flags |= EXTENT_UPTODATE;
Chris Mason4dc11902007-10-15 16:18:14 -04002102 eb->flags |= EXTENT_BUFFER_FILLED;
Chris Mason5f39d392007-10-15 16:14:19 -04002103 return eb;
2104fail:
2105 free_extent_buffer(eb);
2106 return NULL;
2107}
2108EXPORT_SYMBOL(alloc_extent_buffer);
2109
2110struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2111 u64 start, unsigned long len,
2112 gfp_t mask)
2113{
Chris Masondb945352007-10-15 16:15:53 -04002114 unsigned long num_pages = num_extent_pages(start, len);
Chris Mason19c00dd2007-10-15 16:19:22 -04002115 unsigned long i; unsigned long index = start >> PAGE_CACHE_SHIFT;
Chris Mason5f39d392007-10-15 16:14:19 -04002116 struct extent_buffer *eb;
2117 struct page *p;
2118 struct address_space *mapping = tree->mapping;
Chris Mason14048ed2007-10-15 16:16:28 -04002119 int uptodate = 1;
Chris Mason5f39d392007-10-15 16:14:19 -04002120
Chris Mason4dc11902007-10-15 16:18:14 -04002121 eb = __alloc_extent_buffer(tree, start, len, mask);
Chris Mason5f39d392007-10-15 16:14:19 -04002122 if (!eb || IS_ERR(eb))
2123 return NULL;
2124
Chris Mason4dc11902007-10-15 16:18:14 -04002125 if (eb->flags & EXTENT_BUFFER_FILLED)
2126 return eb;
Chris Mason5f39d392007-10-15 16:14:19 -04002127
2128 for (i = 0; i < num_pages; i++, index++) {
Chris Mason14048ed2007-10-15 16:16:28 -04002129 p = find_lock_page(mapping, index);
Chris Mason6d36dcd2007-10-15 16:14:37 -04002130 if (!p) {
2131 /* make sure the free only frees the pages we've
2132 * grabbed a reference on
2133 */
2134 eb->len = i << PAGE_CACHE_SHIFT;
2135 eb->start &= ~((u64)PAGE_CACHE_SIZE - 1);
Chris Mason5f39d392007-10-15 16:14:19 -04002136 goto fail;
Chris Mason6d36dcd2007-10-15 16:14:37 -04002137 }
Chris Masonf510cfe2007-10-15 16:14:48 -04002138 set_page_extent_mapped(p);
Chris Masonff79f812007-10-15 16:22:25 -04002139 mark_page_accessed(p);
Chris Mason19c00dd2007-10-15 16:19:22 -04002140
2141 if (i == 0) {
Chris Mason810191f2007-10-15 16:18:55 -04002142 eb->first_page = p;
Chris Mason19c00dd2007-10-15 16:19:22 -04002143 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2144 len << 2);
2145 } else {
2146 set_page_private(p, EXTENT_PAGE_PRIVATE);
2147 }
2148
Chris Mason14048ed2007-10-15 16:16:28 -04002149 if (!PageUptodate(p))
2150 uptodate = 0;
2151 unlock_page(p);
Chris Mason5f39d392007-10-15 16:14:19 -04002152 }
Chris Mason14048ed2007-10-15 16:16:28 -04002153 if (uptodate)
2154 eb->flags |= EXTENT_UPTODATE;
Chris Mason4dc11902007-10-15 16:18:14 -04002155 eb->flags |= EXTENT_BUFFER_FILLED;
Chris Mason5f39d392007-10-15 16:14:19 -04002156 return eb;
2157fail:
2158 free_extent_buffer(eb);
2159 return NULL;
2160}
2161EXPORT_SYMBOL(find_extent_buffer);
2162
2163void free_extent_buffer(struct extent_buffer *eb)
2164{
2165 unsigned long i;
2166 unsigned long num_pages;
2167
2168 if (!eb)
2169 return;
2170
2171 if (!atomic_dec_and_test(&eb->refs))
2172 return;
2173
Chris Masondb945352007-10-15 16:15:53 -04002174 num_pages = num_extent_pages(eb->start, eb->len);
Chris Mason5f39d392007-10-15 16:14:19 -04002175
Chris Mason09e71a32007-10-15 16:17:04 -04002176 for (i = 0; i < num_pages; i++) {
Chris Mason6d36dcd2007-10-15 16:14:37 -04002177 page_cache_release(extent_buffer_page(eb, i));
Chris Mason5f39d392007-10-15 16:14:19 -04002178 }
Chris Mason6d36dcd2007-10-15 16:14:37 -04002179 __free_extent_buffer(eb);
Chris Mason5f39d392007-10-15 16:14:19 -04002180}
2181EXPORT_SYMBOL(free_extent_buffer);
2182
2183int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2184 struct extent_buffer *eb)
2185{
2186 int set;
2187 unsigned long i;
2188 unsigned long num_pages;
2189 struct page *page;
2190
2191 u64 start = eb->start;
2192 u64 end = start + eb->len - 1;
2193
2194 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
Chris Masondb945352007-10-15 16:15:53 -04002195 num_pages = num_extent_pages(eb->start, eb->len);
Chris Mason5f39d392007-10-15 16:14:19 -04002196
2197 for (i = 0; i < num_pages; i++) {
Chris Mason6d36dcd2007-10-15 16:14:37 -04002198 page = extent_buffer_page(eb, i);
Chris Mason5f39d392007-10-15 16:14:19 -04002199 lock_page(page);
2200 /*
2201 * if we're on the last page or the first page and the
2202 * block isn't aligned on a page boundary, do extra checks
2203 * to make sure we don't clean page that is partially dirty
2204 */
2205 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2206 ((i == num_pages - 1) &&
Yan65555a02007-10-25 15:42:57 -04002207 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
Chris Mason35ebb932007-10-30 16:56:53 -04002208 start = (u64)page->index << PAGE_CACHE_SHIFT;
Chris Mason5f39d392007-10-15 16:14:19 -04002209 end = start + PAGE_CACHE_SIZE - 1;
2210 if (test_range_bit(tree, start, end,
2211 EXTENT_DIRTY, 0)) {
2212 unlock_page(page);
2213 continue;
2214 }
2215 }
2216 clear_page_dirty_for_io(page);
2217 unlock_page(page);
2218 }
2219 return 0;
2220}
2221EXPORT_SYMBOL(clear_extent_buffer_dirty);
2222
2223int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2224 struct extent_buffer *eb)
2225{
2226 return wait_on_extent_writeback(tree, eb->start,
2227 eb->start + eb->len - 1);
2228}
2229EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2230
2231int set_extent_buffer_dirty(struct extent_map_tree *tree,
2232 struct extent_buffer *eb)
2233{
Chris Mason810191f2007-10-15 16:18:55 -04002234 unsigned long i;
2235 unsigned long num_pages;
2236
2237 num_pages = num_extent_pages(eb->start, eb->len);
2238 for (i = 0; i < num_pages; i++) {
Chris Mason19c00dd2007-10-15 16:19:22 -04002239 struct page *page = extent_buffer_page(eb, i);
2240 /* writepage may need to do something special for the
2241 * first page, we have to make sure page->private is
2242 * properly set. releasepage may drop page->private
2243 * on us if the page isn't already dirty.
2244 */
2245 if (i == 0) {
2246 lock_page(page);
2247 set_page_private(page,
2248 EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2249 eb->len << 2);
2250 }
Chris Mason810191f2007-10-15 16:18:55 -04002251 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
Chris Mason19c00dd2007-10-15 16:19:22 -04002252 if (i == 0)
2253 unlock_page(page);
Chris Mason810191f2007-10-15 16:18:55 -04002254 }
2255 return set_extent_dirty(tree, eb->start,
2256 eb->start + eb->len - 1, GFP_NOFS);
Chris Mason5f39d392007-10-15 16:14:19 -04002257}
2258EXPORT_SYMBOL(set_extent_buffer_dirty);
2259
2260int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2261 struct extent_buffer *eb)
2262{
2263 unsigned long i;
2264 struct page *page;
2265 unsigned long num_pages;
2266
Chris Masondb945352007-10-15 16:15:53 -04002267 num_pages = num_extent_pages(eb->start, eb->len);
Chris Mason5f39d392007-10-15 16:14:19 -04002268
2269 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2270 GFP_NOFS);
2271 for (i = 0; i < num_pages; i++) {
Chris Mason6d36dcd2007-10-15 16:14:37 -04002272 page = extent_buffer_page(eb, i);
Chris Mason5f39d392007-10-15 16:14:19 -04002273 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2274 ((i == num_pages - 1) &&
Yan65555a02007-10-25 15:42:57 -04002275 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
Chris Mason5f39d392007-10-15 16:14:19 -04002276 check_page_uptodate(tree, page);
2277 continue;
2278 }
2279 SetPageUptodate(page);
2280 }
2281 return 0;
2282}
2283EXPORT_SYMBOL(set_extent_buffer_uptodate);
2284
2285int extent_buffer_uptodate(struct extent_map_tree *tree,
2286 struct extent_buffer *eb)
2287{
2288 if (eb->flags & EXTENT_UPTODATE)
2289 return 1;
2290 return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2291 EXTENT_UPTODATE, 1);
2292}
2293EXPORT_SYMBOL(extent_buffer_uptodate);
2294
2295int read_extent_buffer_pages(struct extent_map_tree *tree,
Chris Mason19c00dd2007-10-15 16:19:22 -04002296 struct extent_buffer *eb,
2297 u64 start,
2298 int wait)
Chris Mason5f39d392007-10-15 16:14:19 -04002299{
2300 unsigned long i;
Chris Mason19c00dd2007-10-15 16:19:22 -04002301 unsigned long start_i;
Chris Mason5f39d392007-10-15 16:14:19 -04002302 struct page *page;
2303 int err;
2304 int ret = 0;
2305 unsigned long num_pages;
2306
2307 if (eb->flags & EXTENT_UPTODATE)
2308 return 0;
2309
Chris Mason14048ed2007-10-15 16:16:28 -04002310 if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
Chris Mason5f39d392007-10-15 16:14:19 -04002311 EXTENT_UPTODATE, 1)) {
2312 return 0;
2313 }
Chris Mason19c00dd2007-10-15 16:19:22 -04002314 if (start) {
2315 WARN_ON(start < eb->start);
2316 start_i = (start >> PAGE_CACHE_SHIFT) -
2317 (eb->start >> PAGE_CACHE_SHIFT);
2318 } else {
2319 start_i = 0;
2320 }
Chris Mason5f39d392007-10-15 16:14:19 -04002321
Chris Masondb945352007-10-15 16:15:53 -04002322 num_pages = num_extent_pages(eb->start, eb->len);
Chris Mason19c00dd2007-10-15 16:19:22 -04002323 for (i = start_i; i < num_pages; i++) {
Chris Mason6d36dcd2007-10-15 16:14:37 -04002324 page = extent_buffer_page(eb, i);
Chris Mason5f39d392007-10-15 16:14:19 -04002325 if (PageUptodate(page)) {
2326 continue;
2327 }
2328 if (!wait) {
2329 if (TestSetPageLocked(page)) {
2330 continue;
2331 }
2332 } else {
2333 lock_page(page);
2334 }
2335 if (!PageUptodate(page)) {
2336 err = page->mapping->a_ops->readpage(NULL, page);
2337 if (err) {
2338 ret = err;
2339 }
2340 } else {
2341 unlock_page(page);
2342 }
2343 }
2344
2345 if (ret || !wait) {
2346 return ret;
2347 }
2348
Chris Mason19c00dd2007-10-15 16:19:22 -04002349 for (i = start_i; i < num_pages; i++) {
Chris Mason6d36dcd2007-10-15 16:14:37 -04002350 page = extent_buffer_page(eb, i);
Chris Mason5f39d392007-10-15 16:14:19 -04002351 wait_on_page_locked(page);
2352 if (!PageUptodate(page)) {
2353 ret = -EIO;
2354 }
2355 }
Chris Mason4dc11902007-10-15 16:18:14 -04002356 if (!ret)
2357 eb->flags |= EXTENT_UPTODATE;
Chris Mason5f39d392007-10-15 16:14:19 -04002358 return ret;
2359}
2360EXPORT_SYMBOL(read_extent_buffer_pages);
2361
2362void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2363 unsigned long start,
2364 unsigned long len)
2365{
2366 size_t cur;
2367 size_t offset;
2368 struct page *page;
2369 char *kaddr;
2370 char *dst = (char *)dstv;
2371 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2372 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
Chris Mason14048ed2007-10-15 16:16:28 -04002373 unsigned long num_pages = num_extent_pages(eb->start, eb->len);
Chris Mason5f39d392007-10-15 16:14:19 -04002374
2375 WARN_ON(start > eb->len);
2376 WARN_ON(start + len > eb->start + eb->len);
2377
Chris Mason3685f792007-10-19 09:23:27 -04002378 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
Chris Mason5f39d392007-10-15 16:14:19 -04002379
2380 while(len > 0) {
Chris Mason6d36dcd2007-10-15 16:14:37 -04002381 page = extent_buffer_page(eb, i);
Chris Mason14048ed2007-10-15 16:16:28 -04002382 if (!PageUptodate(page)) {
2383 printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2384 WARN_ON(1);
2385 }
Chris Mason5f39d392007-10-15 16:14:19 -04002386 WARN_ON(!PageUptodate(page));
2387
2388 cur = min(len, (PAGE_CACHE_SIZE - offset));
Chris Mason59d169e2007-10-19 09:23:09 -04002389 kaddr = kmap_atomic(page, KM_USER1);
Chris Mason5f39d392007-10-15 16:14:19 -04002390 memcpy(dst, kaddr + offset, cur);
Chris Mason59d169e2007-10-19 09:23:09 -04002391 kunmap_atomic(kaddr, KM_USER1);
Chris Mason5f39d392007-10-15 16:14:19 -04002392
2393 dst += cur;
2394 len -= cur;
2395 offset = 0;
2396 i++;
Chris Mason5f39d392007-10-15 16:14:19 -04002397 }
2398}
2399EXPORT_SYMBOL(read_extent_buffer);
2400
Chris Mason19c00dd2007-10-15 16:19:22 -04002401int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
Chris Masondb945352007-10-15 16:15:53 -04002402 unsigned long min_len, char **token, char **map,
2403 unsigned long *map_start,
2404 unsigned long *map_len, int km)
Chris Mason5f39d392007-10-15 16:14:19 -04002405{
Chris Mason479965d2007-10-15 16:14:27 -04002406 size_t offset = start & (PAGE_CACHE_SIZE - 1);
Chris Mason5f39d392007-10-15 16:14:19 -04002407 char *kaddr;
Chris Masondb945352007-10-15 16:15:53 -04002408 struct page *p;
Chris Mason5f39d392007-10-15 16:14:19 -04002409 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2410 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
Yan65555a02007-10-25 15:42:57 -04002411 unsigned long end_i = (start_offset + start + min_len - 1) >>
Chris Mason810191f2007-10-15 16:18:55 -04002412 PAGE_CACHE_SHIFT;
Chris Mason479965d2007-10-15 16:14:27 -04002413
2414 if (i != end_i)
2415 return -EINVAL;
Chris Mason5f39d392007-10-15 16:14:19 -04002416
Chris Mason5f39d392007-10-15 16:14:19 -04002417 if (i == 0) {
2418 offset = start_offset;
2419 *map_start = 0;
2420 } else {
Chris Masondb945352007-10-15 16:15:53 -04002421 offset = 0;
Chris Mason479965d2007-10-15 16:14:27 -04002422 *map_start = (i << PAGE_CACHE_SHIFT) - start_offset;
Chris Mason5f39d392007-10-15 16:14:19 -04002423 }
Yan65555a02007-10-25 15:42:57 -04002424 if (start + min_len > eb->len) {
Chris Mason19c00dd2007-10-15 16:19:22 -04002425printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2426 WARN_ON(1);
2427 }
Chris Mason5f39d392007-10-15 16:14:19 -04002428
Chris Masondb945352007-10-15 16:15:53 -04002429 p = extent_buffer_page(eb, i);
2430 WARN_ON(!PageUptodate(p));
2431 kaddr = kmap_atomic(p, km);
Chris Mason5f39d392007-10-15 16:14:19 -04002432 *token = kaddr;
2433 *map = kaddr + offset;
2434 *map_len = PAGE_CACHE_SIZE - offset;
2435 return 0;
2436}
Chris Mason19c00dd2007-10-15 16:19:22 -04002437EXPORT_SYMBOL(map_private_extent_buffer);
Chris Masondb945352007-10-15 16:15:53 -04002438
2439int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2440 unsigned long min_len,
2441 char **token, char **map,
2442 unsigned long *map_start,
2443 unsigned long *map_len, int km)
2444{
2445 int err;
2446 int save = 0;
2447 if (eb->map_token) {
Chris Masondb945352007-10-15 16:15:53 -04002448 unmap_extent_buffer(eb, eb->map_token, km);
2449 eb->map_token = NULL;
2450 save = 1;
2451 }
Chris Mason19c00dd2007-10-15 16:19:22 -04002452 err = map_private_extent_buffer(eb, start, min_len, token, map,
2453 map_start, map_len, km);
Chris Masondb945352007-10-15 16:15:53 -04002454 if (!err && save) {
2455 eb->map_token = *token;
2456 eb->kaddr = *map;
2457 eb->map_start = *map_start;
2458 eb->map_len = *map_len;
2459 }
2460 return err;
2461}
Chris Mason5f39d392007-10-15 16:14:19 -04002462EXPORT_SYMBOL(map_extent_buffer);
2463
2464void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2465{
Chris Masonae5252b2007-10-15 16:14:41 -04002466 kunmap_atomic(token, km);
Chris Mason5f39d392007-10-15 16:14:19 -04002467}
2468EXPORT_SYMBOL(unmap_extent_buffer);
2469
2470int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2471 unsigned long start,
2472 unsigned long len)
2473{
2474 size_t cur;
2475 size_t offset;
2476 struct page *page;
2477 char *kaddr;
2478 char *ptr = (char *)ptrv;
2479 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2480 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2481 int ret = 0;
2482
2483 WARN_ON(start > eb->len);
2484 WARN_ON(start + len > eb->start + eb->len);
2485
Chris Mason3685f792007-10-19 09:23:27 -04002486 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
Chris Mason5f39d392007-10-15 16:14:19 -04002487
2488 while(len > 0) {
Chris Mason6d36dcd2007-10-15 16:14:37 -04002489 page = extent_buffer_page(eb, i);
Chris Mason5f39d392007-10-15 16:14:19 -04002490 WARN_ON(!PageUptodate(page));
2491
2492 cur = min(len, (PAGE_CACHE_SIZE - offset));
2493
Chris Masonae5252b2007-10-15 16:14:41 -04002494 kaddr = kmap_atomic(page, KM_USER0);
Chris Mason5f39d392007-10-15 16:14:19 -04002495 ret = memcmp(ptr, kaddr + offset, cur);
Chris Masonae5252b2007-10-15 16:14:41 -04002496 kunmap_atomic(kaddr, KM_USER0);
Chris Mason5f39d392007-10-15 16:14:19 -04002497 if (ret)
2498 break;
2499
2500 ptr += cur;
2501 len -= cur;
2502 offset = 0;
2503 i++;
Chris Mason5f39d392007-10-15 16:14:19 -04002504 }
2505 return ret;
2506}
2507EXPORT_SYMBOL(memcmp_extent_buffer);
2508
2509void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2510 unsigned long start, unsigned long len)
2511{
2512 size_t cur;
2513 size_t offset;
2514 struct page *page;
2515 char *kaddr;
2516 char *src = (char *)srcv;
2517 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2518 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2519
2520 WARN_ON(start > eb->len);
2521 WARN_ON(start + len > eb->start + eb->len);
2522
Chris Mason3685f792007-10-19 09:23:27 -04002523 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
Chris Mason5f39d392007-10-15 16:14:19 -04002524
2525 while(len > 0) {
Chris Mason6d36dcd2007-10-15 16:14:37 -04002526 page = extent_buffer_page(eb, i);
Chris Mason5f39d392007-10-15 16:14:19 -04002527 WARN_ON(!PageUptodate(page));
2528
2529 cur = min(len, PAGE_CACHE_SIZE - offset);
Chris Mason59d169e2007-10-19 09:23:09 -04002530 kaddr = kmap_atomic(page, KM_USER1);
Chris Mason5f39d392007-10-15 16:14:19 -04002531 memcpy(kaddr + offset, src, cur);
Chris Mason59d169e2007-10-19 09:23:09 -04002532 kunmap_atomic(kaddr, KM_USER1);
Chris Mason5f39d392007-10-15 16:14:19 -04002533
2534 src += cur;
2535 len -= cur;
2536 offset = 0;
2537 i++;
Chris Mason5f39d392007-10-15 16:14:19 -04002538 }
2539}
2540EXPORT_SYMBOL(write_extent_buffer);
2541
2542void memset_extent_buffer(struct extent_buffer *eb, char c,
2543 unsigned long start, unsigned long len)
2544{
2545 size_t cur;
2546 size_t offset;
2547 struct page *page;
2548 char *kaddr;
2549 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2550 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2551
2552 WARN_ON(start > eb->len);
2553 WARN_ON(start + len > eb->start + eb->len);
2554
Chris Mason3685f792007-10-19 09:23:27 -04002555 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
Chris Mason5f39d392007-10-15 16:14:19 -04002556
2557 while(len > 0) {
Chris Mason6d36dcd2007-10-15 16:14:37 -04002558 page = extent_buffer_page(eb, i);
Chris Mason5f39d392007-10-15 16:14:19 -04002559 WARN_ON(!PageUptodate(page));
2560
2561 cur = min(len, PAGE_CACHE_SIZE - offset);
Chris Masonae5252b2007-10-15 16:14:41 -04002562 kaddr = kmap_atomic(page, KM_USER0);
Chris Mason5f39d392007-10-15 16:14:19 -04002563 memset(kaddr + offset, c, cur);
Chris Masonae5252b2007-10-15 16:14:41 -04002564 kunmap_atomic(kaddr, KM_USER0);
Chris Mason5f39d392007-10-15 16:14:19 -04002565
2566 len -= cur;
2567 offset = 0;
2568 i++;
Chris Mason5f39d392007-10-15 16:14:19 -04002569 }
2570}
2571EXPORT_SYMBOL(memset_extent_buffer);
2572
2573void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
2574 unsigned long dst_offset, unsigned long src_offset,
2575 unsigned long len)
2576{
2577 u64 dst_len = dst->len;
2578 size_t cur;
2579 size_t offset;
2580 struct page *page;
2581 char *kaddr;
2582 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2583 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2584
2585 WARN_ON(src->len != dst_len);
2586
Chris Mason3685f792007-10-19 09:23:27 -04002587 offset = (start_offset + dst_offset) &
2588 ((unsigned long)PAGE_CACHE_SIZE - 1);
Chris Mason5f39d392007-10-15 16:14:19 -04002589
2590 while(len > 0) {
Chris Mason6d36dcd2007-10-15 16:14:37 -04002591 page = extent_buffer_page(dst, i);
Chris Mason5f39d392007-10-15 16:14:19 -04002592 WARN_ON(!PageUptodate(page));
2593
2594 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
2595
Chris Masonff190c02007-10-19 10:39:41 -04002596 kaddr = kmap_atomic(page, KM_USER0);
Chris Mason5f39d392007-10-15 16:14:19 -04002597 read_extent_buffer(src, kaddr + offset, src_offset, cur);
Chris Masonff190c02007-10-19 10:39:41 -04002598 kunmap_atomic(kaddr, KM_USER0);
Chris Mason5f39d392007-10-15 16:14:19 -04002599
2600 src_offset += cur;
2601 len -= cur;
2602 offset = 0;
2603 i++;
2604 }
2605}
2606EXPORT_SYMBOL(copy_extent_buffer);
2607
2608static void move_pages(struct page *dst_page, struct page *src_page,
2609 unsigned long dst_off, unsigned long src_off,
2610 unsigned long len)
2611{
Chris Masonae5252b2007-10-15 16:14:41 -04002612 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
Chris Mason5f39d392007-10-15 16:14:19 -04002613 if (dst_page == src_page) {
2614 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
2615 } else {
Chris Masonae5252b2007-10-15 16:14:41 -04002616 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
Chris Mason5f39d392007-10-15 16:14:19 -04002617 char *p = dst_kaddr + dst_off + len;
2618 char *s = src_kaddr + src_off + len;
2619
2620 while (len--)
2621 *--p = *--s;
2622
Chris Masonae5252b2007-10-15 16:14:41 -04002623 kunmap_atomic(src_kaddr, KM_USER1);
Chris Mason5f39d392007-10-15 16:14:19 -04002624 }
Chris Masonae5252b2007-10-15 16:14:41 -04002625 kunmap_atomic(dst_kaddr, KM_USER0);
Chris Mason5f39d392007-10-15 16:14:19 -04002626}
2627
2628static void copy_pages(struct page *dst_page, struct page *src_page,
2629 unsigned long dst_off, unsigned long src_off,
2630 unsigned long len)
2631{
Chris Masonae5252b2007-10-15 16:14:41 -04002632 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
Chris Mason5f39d392007-10-15 16:14:19 -04002633 char *src_kaddr;
2634
2635 if (dst_page != src_page)
Chris Masonae5252b2007-10-15 16:14:41 -04002636 src_kaddr = kmap_atomic(src_page, KM_USER1);
Chris Mason5f39d392007-10-15 16:14:19 -04002637 else
2638 src_kaddr = dst_kaddr;
2639
2640 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
Chris Mason5f39d392007-10-15 16:14:19 -04002641 kunmap_atomic(dst_kaddr, KM_USER0);
2642 if (dst_page != src_page)
2643 kunmap_atomic(src_kaddr, KM_USER1);
Chris Mason5f39d392007-10-15 16:14:19 -04002644}
2645
2646void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2647 unsigned long src_offset, unsigned long len)
2648{
2649 size_t cur;
2650 size_t dst_off_in_page;
2651 size_t src_off_in_page;
2652 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2653 unsigned long dst_i;
2654 unsigned long src_i;
2655
2656 if (src_offset + len > dst->len) {
2657 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2658 src_offset, len, dst->len);
2659 BUG_ON(1);
2660 }
2661 if (dst_offset + len > dst->len) {
2662 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2663 dst_offset, len, dst->len);
2664 BUG_ON(1);
2665 }
2666
2667 while(len > 0) {
Chris Mason3685f792007-10-19 09:23:27 -04002668 dst_off_in_page = (start_offset + dst_offset) &
Chris Mason5f39d392007-10-15 16:14:19 -04002669 ((unsigned long)PAGE_CACHE_SIZE - 1);
Chris Mason3685f792007-10-19 09:23:27 -04002670 src_off_in_page = (start_offset + src_offset) &
Chris Mason5f39d392007-10-15 16:14:19 -04002671 ((unsigned long)PAGE_CACHE_SIZE - 1);
2672
2673 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2674 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
2675
Chris Mason5f39d392007-10-15 16:14:19 -04002676 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
2677 src_off_in_page));
Jens Axboeae2f5412007-10-19 09:22:59 -04002678 cur = min_t(unsigned long, cur,
2679 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
Chris Mason5f39d392007-10-15 16:14:19 -04002680
Chris Mason6d36dcd2007-10-15 16:14:37 -04002681 copy_pages(extent_buffer_page(dst, dst_i),
2682 extent_buffer_page(dst, src_i),
Chris Mason5f39d392007-10-15 16:14:19 -04002683 dst_off_in_page, src_off_in_page, cur);
2684
2685 src_offset += cur;
2686 dst_offset += cur;
2687 len -= cur;
2688 }
2689}
2690EXPORT_SYMBOL(memcpy_extent_buffer);
2691
2692void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2693 unsigned long src_offset, unsigned long len)
2694{
2695 size_t cur;
2696 size_t dst_off_in_page;
2697 size_t src_off_in_page;
2698 unsigned long dst_end = dst_offset + len - 1;
2699 unsigned long src_end = src_offset + len - 1;
2700 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2701 unsigned long dst_i;
2702 unsigned long src_i;
2703
2704 if (src_offset + len > dst->len) {
2705 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2706 src_offset, len, dst->len);
2707 BUG_ON(1);
2708 }
2709 if (dst_offset + len > dst->len) {
2710 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2711 dst_offset, len, dst->len);
2712 BUG_ON(1);
2713 }
2714 if (dst_offset < src_offset) {
2715 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
2716 return;
2717 }
2718 while(len > 0) {
2719 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
2720 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
2721
Chris Mason3685f792007-10-19 09:23:27 -04002722 dst_off_in_page = (start_offset + dst_end) &
Chris Mason5f39d392007-10-15 16:14:19 -04002723 ((unsigned long)PAGE_CACHE_SIZE - 1);
Chris Mason3685f792007-10-19 09:23:27 -04002724 src_off_in_page = (start_offset + src_end) &
Chris Mason5f39d392007-10-15 16:14:19 -04002725 ((unsigned long)PAGE_CACHE_SIZE - 1);
Chris Mason5f39d392007-10-15 16:14:19 -04002726
Jens Axboeae2f5412007-10-19 09:22:59 -04002727 cur = min_t(unsigned long, len, src_off_in_page + 1);
Chris Mason5f39d392007-10-15 16:14:19 -04002728 cur = min(cur, dst_off_in_page + 1);
Chris Mason6d36dcd2007-10-15 16:14:37 -04002729 move_pages(extent_buffer_page(dst, dst_i),
2730 extent_buffer_page(dst, src_i),
Chris Mason5f39d392007-10-15 16:14:19 -04002731 dst_off_in_page - cur + 1,
2732 src_off_in_page - cur + 1, cur);
2733
Chris Masondb945352007-10-15 16:15:53 -04002734 dst_end -= cur;
2735 src_end -= cur;
Chris Mason5f39d392007-10-15 16:14:19 -04002736 len -= cur;
2737 }
2738}
2739EXPORT_SYMBOL(memmove_extent_buffer);