Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2007 Oracle. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public |
| 6 | * License v2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public |
| 14 | * License along with this program; if not, write to the |
| 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
| 16 | * Boston, MA 021110-1307, USA. |
| 17 | */ |
| 18 | |
| 19 | #include <linux/gfp.h> |
| 20 | #include <linux/slab.h> |
Chris Mason | d6bfde8 | 2008-04-30 13:59:35 -0400 | [diff] [blame] | 21 | #include <linux/blkdev.h> |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 22 | #include "ctree.h" |
| 23 | #include "transaction.h" |
| 24 | #include "btrfs_inode.h" |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 25 | #include "extent_io.h" |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 26 | |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 27 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 28 | static u64 entry_end(struct btrfs_ordered_extent *entry) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 29 | { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 30 | if (entry->file_offset + entry->len < entry->file_offset) |
| 31 | return (u64)-1; |
| 32 | return entry->file_offset + entry->len; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 33 | } |
| 34 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 35 | static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, |
| 36 | struct rb_node *node) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 37 | { |
| 38 | struct rb_node ** p = &root->rb_node; |
| 39 | struct rb_node * parent = NULL; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 40 | struct btrfs_ordered_extent *entry; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 41 | |
| 42 | while(*p) { |
| 43 | parent = *p; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 44 | entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 45 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 46 | if (file_offset < entry->file_offset) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 47 | p = &(*p)->rb_left; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 48 | else if (file_offset >= entry_end(entry)) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 49 | p = &(*p)->rb_right; |
| 50 | else |
| 51 | return parent; |
| 52 | } |
| 53 | |
| 54 | rb_link_node(node, parent, p); |
| 55 | rb_insert_color(node, root); |
| 56 | return NULL; |
| 57 | } |
| 58 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 59 | static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, |
| 60 | struct rb_node **prev_ret) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 61 | { |
| 62 | struct rb_node * n = root->rb_node; |
| 63 | struct rb_node *prev = NULL; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 64 | struct rb_node *test; |
| 65 | struct btrfs_ordered_extent *entry; |
| 66 | struct btrfs_ordered_extent *prev_entry = NULL; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 67 | |
| 68 | while(n) { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 69 | entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 70 | prev = n; |
| 71 | prev_entry = entry; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 72 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 73 | if (file_offset < entry->file_offset) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 74 | n = n->rb_left; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 75 | else if (file_offset >= entry_end(entry)) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 76 | n = n->rb_right; |
| 77 | else |
| 78 | return n; |
| 79 | } |
| 80 | if (!prev_ret) |
| 81 | return NULL; |
| 82 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 83 | while(prev && file_offset >= entry_end(prev_entry)) { |
| 84 | test = rb_next(prev); |
| 85 | if (!test) |
| 86 | break; |
| 87 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, |
| 88 | rb_node); |
| 89 | if (file_offset < entry_end(prev_entry)) |
| 90 | break; |
| 91 | |
| 92 | prev = test; |
| 93 | } |
| 94 | if (prev) |
| 95 | prev_entry = rb_entry(prev, struct btrfs_ordered_extent, |
| 96 | rb_node); |
| 97 | while(prev && file_offset < entry_end(prev_entry)) { |
| 98 | test = rb_prev(prev); |
| 99 | if (!test) |
| 100 | break; |
| 101 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, |
| 102 | rb_node); |
| 103 | prev = test; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 104 | } |
| 105 | *prev_ret = prev; |
| 106 | return NULL; |
| 107 | } |
| 108 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 109 | static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 110 | { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 111 | if (file_offset < entry->file_offset || |
| 112 | entry->file_offset + entry->len <= file_offset) |
| 113 | return 0; |
| 114 | return 1; |
| 115 | } |
| 116 | |
| 117 | static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, |
| 118 | u64 file_offset) |
| 119 | { |
| 120 | struct rb_root *root = &tree->tree; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 121 | struct rb_node *prev; |
| 122 | struct rb_node *ret; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 123 | struct btrfs_ordered_extent *entry; |
| 124 | |
| 125 | if (tree->last) { |
| 126 | entry = rb_entry(tree->last, struct btrfs_ordered_extent, |
| 127 | rb_node); |
| 128 | if (offset_in_entry(entry, file_offset)) |
| 129 | return tree->last; |
| 130 | } |
| 131 | ret = __tree_search(root, file_offset, &prev); |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 132 | if (!ret) |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 133 | ret = prev; |
| 134 | if (ret) |
| 135 | tree->last = ret; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 136 | return ret; |
| 137 | } |
| 138 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 139 | int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
| 140 | u64 start, u64 len) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 141 | { |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 142 | struct btrfs_ordered_inode_tree *tree; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 143 | struct rb_node *node; |
| 144 | struct btrfs_ordered_extent *entry; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 145 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 146 | tree = &BTRFS_I(inode)->ordered_tree; |
| 147 | entry = kzalloc(sizeof(*entry), GFP_NOFS); |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 148 | if (!entry) |
| 149 | return -ENOMEM; |
| 150 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 151 | mutex_lock(&tree->mutex); |
| 152 | entry->file_offset = file_offset; |
| 153 | entry->start = start; |
| 154 | entry->len = len; |
Chris Mason | 4d5e74b | 2008-01-16 16:09:22 -0500 | [diff] [blame] | 155 | entry->inode = inode; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 156 | /* one ref for the tree */ |
| 157 | atomic_set(&entry->refs, 1); |
| 158 | init_waitqueue_head(&entry->wait); |
| 159 | INIT_LIST_HEAD(&entry->list); |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 160 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 161 | node = tree_insert(&tree->tree, file_offset, |
| 162 | &entry->rb_node); |
| 163 | if (node) { |
| 164 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 165 | atomic_inc(&entry->refs); |
| 166 | } |
| 167 | set_extent_ordered(&BTRFS_I(inode)->io_tree, file_offset, |
| 168 | entry_end(entry) - 1, GFP_NOFS); |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 169 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 170 | set_bit(BTRFS_ORDERED_START, &entry->flags); |
| 171 | mutex_unlock(&tree->mutex); |
| 172 | BUG_ON(node); |
| 173 | return 0; |
| 174 | } |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 175 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 176 | int btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_sum *sum) |
| 177 | { |
| 178 | struct btrfs_ordered_inode_tree *tree; |
| 179 | struct rb_node *node; |
| 180 | struct btrfs_ordered_extent *entry; |
Chris Mason | 1b1e213 | 2008-06-25 16:01:31 -0400 | [diff] [blame] | 181 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 182 | tree = &BTRFS_I(inode)->ordered_tree; |
| 183 | mutex_lock(&tree->mutex); |
| 184 | node = tree_search(tree, sum->file_offset); |
| 185 | if (!node) { |
| 186 | search_fail: |
| 187 | printk("add ordered sum failed to find a node for inode %lu offset %Lu\n", inode->i_ino, sum->file_offset); |
| 188 | node = rb_first(&tree->tree); |
| 189 | while(node) { |
| 190 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 191 | printk("entry %Lu %Lu %Lu\n", entry->file_offset, entry->file_offset + entry->len, entry->start); |
| 192 | node = rb_next(node); |
| 193 | } |
| 194 | BUG(); |
| 195 | } |
| 196 | BUG_ON(!node); |
| 197 | |
| 198 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 199 | if (!offset_in_entry(entry, sum->file_offset)) { |
| 200 | goto search_fail; |
| 201 | } |
| 202 | |
| 203 | list_add_tail(&sum->list, &entry->list); |
| 204 | mutex_unlock(&tree->mutex); |
| 205 | return 0; |
| 206 | } |
| 207 | |
| 208 | int btrfs_dec_test_ordered_pending(struct inode *inode, |
| 209 | u64 file_offset, u64 io_size) |
| 210 | { |
| 211 | struct btrfs_ordered_inode_tree *tree; |
| 212 | struct rb_node *node; |
| 213 | struct btrfs_ordered_extent *entry; |
| 214 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
| 215 | int ret; |
| 216 | |
| 217 | tree = &BTRFS_I(inode)->ordered_tree; |
| 218 | mutex_lock(&tree->mutex); |
| 219 | clear_extent_ordered(io_tree, file_offset, file_offset + io_size - 1, |
| 220 | GFP_NOFS); |
| 221 | node = tree_search(tree, file_offset); |
| 222 | if (!node) { |
| 223 | ret = 1; |
| 224 | goto out; |
| 225 | } |
| 226 | |
| 227 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 228 | if (!offset_in_entry(entry, file_offset)) { |
| 229 | ret = 1; |
| 230 | goto out; |
| 231 | } |
| 232 | |
| 233 | ret = test_range_bit(io_tree, entry->file_offset, |
| 234 | entry->file_offset + entry->len - 1, |
| 235 | EXTENT_ORDERED, 0); |
| 236 | if (!test_bit(BTRFS_ORDERED_START, &entry->flags)) { |
| 237 | printk("inode %lu not ready yet for extent %Lu %Lu\n", inode->i_ino, entry->file_offset, entry_end(entry)); |
| 238 | } |
| 239 | if (ret == 0) |
| 240 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
| 241 | out: |
| 242 | mutex_unlock(&tree->mutex); |
| 243 | return ret == 0; |
| 244 | } |
| 245 | |
| 246 | int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) |
| 247 | { |
| 248 | if (atomic_dec_and_test(&entry->refs)) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 249 | kfree(entry); |
| 250 | return 0; |
| 251 | } |
| 252 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 253 | int btrfs_remove_ordered_extent(struct inode *inode, |
| 254 | struct btrfs_ordered_extent *entry) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 255 | { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 256 | struct btrfs_ordered_inode_tree *tree; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 257 | struct rb_node *node; |
| 258 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 259 | tree = &BTRFS_I(inode)->ordered_tree; |
| 260 | mutex_lock(&tree->mutex); |
| 261 | node = &entry->rb_node; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 262 | rb_erase(node, &tree->tree); |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 263 | tree->last = NULL; |
| 264 | set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); |
| 265 | mutex_unlock(&tree->mutex); |
| 266 | wake_up(&entry->wait); |
Chris Mason | 81d7ed2 | 2008-04-25 08:51:48 -0400 | [diff] [blame] | 267 | return 0; |
| 268 | } |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 269 | |
| 270 | void btrfs_wait_ordered_extent(struct inode *inode, |
| 271 | struct btrfs_ordered_extent *entry) |
| 272 | { |
| 273 | u64 start = entry->file_offset; |
| 274 | u64 end = start + entry->len - 1; |
| 275 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) |
| 276 | do_sync_file_range(file, start, end, SYNC_FILE_RANGE_WRITE); |
| 277 | #else |
| 278 | do_sync_mapping_range(inode->i_mapping, start, end, |
| 279 | SYNC_FILE_RANGE_WRITE); |
| 280 | #endif |
| 281 | wait_event(entry->wait, |
| 282 | test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags)); |
| 283 | } |
| 284 | |
| 285 | static void btrfs_start_ordered_extent(struct inode *inode, |
| 286 | struct btrfs_ordered_extent *entry, int wait) |
| 287 | { |
| 288 | u64 start = entry->file_offset; |
| 289 | u64 end = start + entry->len - 1; |
| 290 | |
| 291 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) |
| 292 | do_sync_file_range(file, start, end, SYNC_FILE_RANGE_WRITE); |
| 293 | #else |
| 294 | do_sync_mapping_range(inode->i_mapping, start, end, |
| 295 | SYNC_FILE_RANGE_WRITE); |
| 296 | #endif |
| 297 | if (wait) |
| 298 | wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, |
| 299 | &entry->flags)); |
| 300 | } |
| 301 | |
| 302 | void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) |
| 303 | { |
| 304 | u64 end; |
| 305 | struct btrfs_ordered_extent *ordered; |
| 306 | int found; |
| 307 | int should_wait = 0; |
| 308 | |
| 309 | again: |
| 310 | if (start + len < start) |
| 311 | end = (u64)-1; |
| 312 | else |
| 313 | end = start + len - 1; |
| 314 | found = 0; |
| 315 | while(1) { |
| 316 | ordered = btrfs_lookup_first_ordered_extent(inode, end); |
| 317 | if (!ordered) { |
| 318 | break; |
| 319 | } |
| 320 | if (ordered->file_offset >= start + len) { |
| 321 | btrfs_put_ordered_extent(ordered); |
| 322 | break; |
| 323 | } |
| 324 | if (ordered->file_offset + ordered->len < start) { |
| 325 | btrfs_put_ordered_extent(ordered); |
| 326 | break; |
| 327 | } |
| 328 | btrfs_start_ordered_extent(inode, ordered, should_wait); |
| 329 | found++; |
| 330 | end = ordered->file_offset; |
| 331 | btrfs_put_ordered_extent(ordered); |
| 332 | if (end == 0) |
| 333 | break; |
| 334 | end--; |
| 335 | } |
| 336 | if (should_wait && found) { |
| 337 | should_wait = 0; |
| 338 | goto again; |
| 339 | } |
| 340 | } |
| 341 | |
| 342 | int btrfs_add_ordered_pending(struct inode *inode, |
| 343 | struct btrfs_ordered_extent *ordered, |
| 344 | u64 start, u64 len) |
| 345 | { |
| 346 | WARN_ON(1); |
| 347 | return 0; |
| 348 | #if 0 |
| 349 | int ret; |
| 350 | struct btrfs_ordered_inode_tree *tree; |
| 351 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
| 352 | |
| 353 | tree = &BTRFS_I(inode)->ordered_tree; |
| 354 | mutex_lock(&tree->mutex); |
| 355 | if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) { |
| 356 | ret = -EAGAIN; |
| 357 | goto out; |
| 358 | } |
| 359 | set_extent_ordered(io_tree, start, start + len - 1, GFP_NOFS); |
| 360 | ret = 0; |
| 361 | out: |
| 362 | mutex_unlock(&tree->mutex); |
| 363 | return ret; |
| 364 | #endif |
| 365 | } |
| 366 | |
| 367 | struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, |
| 368 | u64 file_offset) |
| 369 | { |
| 370 | struct btrfs_ordered_inode_tree *tree; |
| 371 | struct rb_node *node; |
| 372 | struct btrfs_ordered_extent *entry = NULL; |
| 373 | |
| 374 | tree = &BTRFS_I(inode)->ordered_tree; |
| 375 | mutex_lock(&tree->mutex); |
| 376 | node = tree_search(tree, file_offset); |
| 377 | if (!node) |
| 378 | goto out; |
| 379 | |
| 380 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 381 | if (!offset_in_entry(entry, file_offset)) |
| 382 | entry = NULL; |
| 383 | if (entry) |
| 384 | atomic_inc(&entry->refs); |
| 385 | out: |
| 386 | mutex_unlock(&tree->mutex); |
| 387 | return entry; |
| 388 | } |
| 389 | |
| 390 | struct btrfs_ordered_extent * |
| 391 | btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset) |
| 392 | { |
| 393 | struct btrfs_ordered_inode_tree *tree; |
| 394 | struct rb_node *node; |
| 395 | struct btrfs_ordered_extent *entry = NULL; |
| 396 | |
| 397 | tree = &BTRFS_I(inode)->ordered_tree; |
| 398 | mutex_lock(&tree->mutex); |
| 399 | node = tree_search(tree, file_offset); |
| 400 | if (!node) |
| 401 | goto out; |
| 402 | |
| 403 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 404 | atomic_inc(&entry->refs); |
| 405 | out: |
| 406 | mutex_unlock(&tree->mutex); |
| 407 | return entry; |
| 408 | } |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame^] | 409 | |
| 410 | int btrfs_ordered_update_i_size(struct inode *inode, |
| 411 | struct btrfs_ordered_extent *ordered) |
| 412 | { |
| 413 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; |
| 414 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
| 415 | u64 disk_i_size; |
| 416 | u64 new_i_size; |
| 417 | u64 i_size_test; |
| 418 | struct rb_node *node; |
| 419 | struct btrfs_ordered_extent *test; |
| 420 | |
| 421 | mutex_lock(&tree->mutex); |
| 422 | disk_i_size = BTRFS_I(inode)->disk_i_size; |
| 423 | |
| 424 | /* |
| 425 | * if the disk i_size is already at the inode->i_size, or |
| 426 | * this ordered extent is inside the disk i_size, we're done |
| 427 | */ |
| 428 | if (disk_i_size >= inode->i_size || |
| 429 | ordered->file_offset + ordered->len <= disk_i_size) { |
| 430 | goto out; |
| 431 | } |
| 432 | |
| 433 | /* |
| 434 | * we can't update the disk_isize if there are delalloc bytes |
| 435 | * between disk_i_size and this ordered extent |
| 436 | */ |
| 437 | if (test_range_bit(io_tree, disk_i_size, |
| 438 | ordered->file_offset + ordered->len - 1, |
| 439 | EXTENT_DELALLOC, 0)) { |
| 440 | goto out; |
| 441 | } |
| 442 | /* |
| 443 | * walk backward from this ordered extent to disk_i_size. |
| 444 | * if we find an ordered extent then we can't update disk i_size |
| 445 | * yet |
| 446 | */ |
| 447 | while(1) { |
| 448 | node = rb_prev(&ordered->rb_node); |
| 449 | if (!node) |
| 450 | break; |
| 451 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 452 | if (test->file_offset + test->len <= disk_i_size) |
| 453 | break; |
| 454 | if (test->file_offset >= inode->i_size) |
| 455 | break; |
| 456 | if (test->file_offset >= disk_i_size) |
| 457 | goto out; |
| 458 | } |
| 459 | new_i_size = min_t(u64, entry_end(ordered), i_size_read(inode)); |
| 460 | |
| 461 | /* |
| 462 | * at this point, we know we can safely update i_size to at least |
| 463 | * the offset from this ordered extent. But, we need to |
| 464 | * walk forward and see if ios from higher up in the file have |
| 465 | * finished. |
| 466 | */ |
| 467 | node = rb_next(&ordered->rb_node); |
| 468 | i_size_test = 0; |
| 469 | if (node) { |
| 470 | /* |
| 471 | * do we have an area where IO might have finished |
| 472 | * between our ordered extent and the next one. |
| 473 | */ |
| 474 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 475 | if (test->file_offset > entry_end(ordered)) { |
| 476 | i_size_test = test->file_offset - 1; |
| 477 | } |
| 478 | } else { |
| 479 | i_size_test = i_size_read(inode); |
| 480 | } |
| 481 | |
| 482 | /* |
| 483 | * i_size_test is the end of a region after this ordered |
| 484 | * extent where there are no ordered extents. As long as there |
| 485 | * are no delalloc bytes in this area, it is safe to update |
| 486 | * disk_i_size to the end of the region. |
| 487 | */ |
| 488 | if (i_size_test > entry_end(ordered) && |
| 489 | !test_range_bit(io_tree, entry_end(ordered), i_size_test, |
| 490 | EXTENT_DELALLOC, 0)) { |
| 491 | new_i_size = min_t(u64, i_size_test, i_size_read(inode)); |
| 492 | } |
| 493 | BTRFS_I(inode)->disk_i_size = new_i_size; |
| 494 | out: |
| 495 | mutex_unlock(&tree->mutex); |
| 496 | return 0; |
| 497 | } |