Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2007 Oracle. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public |
| 6 | * License v2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public |
| 14 | * License along with this program; if not, write to the |
| 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
| 16 | * Boston, MA 021110-1307, USA. |
| 17 | */ |
| 18 | |
| 19 | #include <linux/gfp.h> |
| 20 | #include <linux/slab.h> |
Chris Mason | d6bfde8 | 2008-04-30 13:59:35 -0400 | [diff] [blame] | 21 | #include <linux/blkdev.h> |
Chris Mason | f421950 | 2008-07-22 11:18:09 -0400 | [diff] [blame] | 22 | #include <linux/writeback.h> |
| 23 | #include <linux/pagevec.h> |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 24 | #include "ctree.h" |
| 25 | #include "transaction.h" |
| 26 | #include "btrfs_inode.h" |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 27 | #include "extent_io.h" |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 28 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 29 | static u64 entry_end(struct btrfs_ordered_extent *entry) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 30 | { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 31 | if (entry->file_offset + entry->len < entry->file_offset) |
| 32 | return (u64)-1; |
| 33 | return entry->file_offset + entry->len; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 34 | } |
| 35 | |
Chris Mason | d352ac6 | 2008-09-29 15:18:18 -0400 | [diff] [blame] | 36 | /* returns NULL if the insertion worked, or it returns the node it did find |
| 37 | * in the tree |
| 38 | */ |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 39 | static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, |
| 40 | struct rb_node *node) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 41 | { |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 42 | struct rb_node **p = &root->rb_node; |
| 43 | struct rb_node *parent = NULL; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 44 | struct btrfs_ordered_extent *entry; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 45 | |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 46 | while (*p) { |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 47 | parent = *p; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 48 | entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 49 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 50 | if (file_offset < entry->file_offset) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 51 | p = &(*p)->rb_left; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 52 | else if (file_offset >= entry_end(entry)) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 53 | p = &(*p)->rb_right; |
| 54 | else |
| 55 | return parent; |
| 56 | } |
| 57 | |
| 58 | rb_link_node(node, parent, p); |
| 59 | rb_insert_color(node, root); |
| 60 | return NULL; |
| 61 | } |
| 62 | |
Chris Mason | d352ac6 | 2008-09-29 15:18:18 -0400 | [diff] [blame] | 63 | /* |
| 64 | * look for a given offset in the tree, and if it can't be found return the |
| 65 | * first lesser offset |
| 66 | */ |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 67 | static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, |
| 68 | struct rb_node **prev_ret) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 69 | { |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 70 | struct rb_node *n = root->rb_node; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 71 | struct rb_node *prev = NULL; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 72 | struct rb_node *test; |
| 73 | struct btrfs_ordered_extent *entry; |
| 74 | struct btrfs_ordered_extent *prev_entry = NULL; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 75 | |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 76 | while (n) { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 77 | entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 78 | prev = n; |
| 79 | prev_entry = entry; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 80 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 81 | if (file_offset < entry->file_offset) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 82 | n = n->rb_left; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 83 | else if (file_offset >= entry_end(entry)) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 84 | n = n->rb_right; |
| 85 | else |
| 86 | return n; |
| 87 | } |
| 88 | if (!prev_ret) |
| 89 | return NULL; |
| 90 | |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 91 | while (prev && file_offset >= entry_end(prev_entry)) { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 92 | test = rb_next(prev); |
| 93 | if (!test) |
| 94 | break; |
| 95 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, |
| 96 | rb_node); |
| 97 | if (file_offset < entry_end(prev_entry)) |
| 98 | break; |
| 99 | |
| 100 | prev = test; |
| 101 | } |
| 102 | if (prev) |
| 103 | prev_entry = rb_entry(prev, struct btrfs_ordered_extent, |
| 104 | rb_node); |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 105 | while (prev && file_offset < entry_end(prev_entry)) { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 106 | test = rb_prev(prev); |
| 107 | if (!test) |
| 108 | break; |
| 109 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, |
| 110 | rb_node); |
| 111 | prev = test; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 112 | } |
| 113 | *prev_ret = prev; |
| 114 | return NULL; |
| 115 | } |
| 116 | |
Chris Mason | d352ac6 | 2008-09-29 15:18:18 -0400 | [diff] [blame] | 117 | /* |
| 118 | * helper to check if a given offset is inside a given entry |
| 119 | */ |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 120 | static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 121 | { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 122 | if (file_offset < entry->file_offset || |
| 123 | entry->file_offset + entry->len <= file_offset) |
| 124 | return 0; |
| 125 | return 1; |
| 126 | } |
| 127 | |
Chris Mason | d352ac6 | 2008-09-29 15:18:18 -0400 | [diff] [blame] | 128 | /* |
| 129 | * look find the first ordered struct that has this offset, otherwise |
| 130 | * the first one less than this offset |
| 131 | */ |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 132 | static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, |
| 133 | u64 file_offset) |
| 134 | { |
| 135 | struct rb_root *root = &tree->tree; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 136 | struct rb_node *prev; |
| 137 | struct rb_node *ret; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 138 | struct btrfs_ordered_extent *entry; |
| 139 | |
| 140 | if (tree->last) { |
| 141 | entry = rb_entry(tree->last, struct btrfs_ordered_extent, |
| 142 | rb_node); |
| 143 | if (offset_in_entry(entry, file_offset)) |
| 144 | return tree->last; |
| 145 | } |
| 146 | ret = __tree_search(root, file_offset, &prev); |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 147 | if (!ret) |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 148 | ret = prev; |
| 149 | if (ret) |
| 150 | tree->last = ret; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 151 | return ret; |
| 152 | } |
| 153 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 154 | /* allocate and add a new ordered_extent into the per-inode tree. |
| 155 | * file_offset is the logical offset in the file |
| 156 | * |
| 157 | * start is the disk block number of an extent already reserved in the |
| 158 | * extent allocation tree |
| 159 | * |
| 160 | * len is the length of the extent |
| 161 | * |
| 162 | * This also sets the EXTENT_ORDERED bit on the range in the inode. |
| 163 | * |
| 164 | * The tree is given a single reference on the ordered extent that was |
| 165 | * inserted. |
| 166 | */ |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 167 | int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
Yan Zheng | 80ff385 | 2008-10-30 14:20:02 -0400 | [diff] [blame] | 168 | u64 start, u64 len, u64 disk_len, int type) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 169 | { |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 170 | struct btrfs_ordered_inode_tree *tree; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 171 | struct rb_node *node; |
| 172 | struct btrfs_ordered_extent *entry; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 173 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 174 | tree = &BTRFS_I(inode)->ordered_tree; |
| 175 | entry = kzalloc(sizeof(*entry), GFP_NOFS); |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 176 | if (!entry) |
| 177 | return -ENOMEM; |
| 178 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 179 | mutex_lock(&tree->mutex); |
| 180 | entry->file_offset = file_offset; |
| 181 | entry->start = start; |
| 182 | entry->len = len; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 183 | entry->disk_len = disk_len; |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 184 | entry->inode = inode; |
Yan Zheng | d899e05 | 2008-10-30 14:25:28 -0400 | [diff] [blame] | 185 | if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) |
Yan Zheng | 80ff385 | 2008-10-30 14:20:02 -0400 | [diff] [blame] | 186 | set_bit(type, &entry->flags); |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 187 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 188 | /* one ref for the tree */ |
| 189 | atomic_set(&entry->refs, 1); |
| 190 | init_waitqueue_head(&entry->wait); |
| 191 | INIT_LIST_HEAD(&entry->list); |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 192 | INIT_LIST_HEAD(&entry->root_extent_list); |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 193 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 194 | node = tree_insert(&tree->tree, file_offset, |
| 195 | &entry->rb_node); |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 196 | BUG_ON(node); |
| 197 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 198 | set_extent_ordered(&BTRFS_I(inode)->io_tree, file_offset, |
| 199 | entry_end(entry) - 1, GFP_NOFS); |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 200 | |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 201 | spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); |
| 202 | list_add_tail(&entry->root_extent_list, |
| 203 | &BTRFS_I(inode)->root->fs_info->ordered_extents); |
| 204 | spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); |
| 205 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 206 | mutex_unlock(&tree->mutex); |
| 207 | BUG_ON(node); |
| 208 | return 0; |
| 209 | } |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 210 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 211 | /* |
| 212 | * Add a struct btrfs_ordered_sum into the list of checksums to be inserted |
Chris Mason | 3edf7d3 | 2008-07-18 06:17:13 -0400 | [diff] [blame] | 213 | * when an ordered extent is finished. If the list covers more than one |
| 214 | * ordered extent, it is split across multiples. |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 215 | */ |
Chris Mason | 3edf7d3 | 2008-07-18 06:17:13 -0400 | [diff] [blame] | 216 | int btrfs_add_ordered_sum(struct inode *inode, |
| 217 | struct btrfs_ordered_extent *entry, |
| 218 | struct btrfs_ordered_sum *sum) |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 219 | { |
| 220 | struct btrfs_ordered_inode_tree *tree; |
Chris Mason | 1b1e213 | 2008-06-25 16:01:31 -0400 | [diff] [blame] | 221 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 222 | tree = &BTRFS_I(inode)->ordered_tree; |
| 223 | mutex_lock(&tree->mutex); |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 224 | list_add_tail(&sum->list, &entry->list); |
| 225 | mutex_unlock(&tree->mutex); |
| 226 | return 0; |
| 227 | } |
| 228 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 229 | /* |
| 230 | * this is used to account for finished IO across a given range |
| 231 | * of the file. The IO should not span ordered extents. If |
| 232 | * a given ordered_extent is completely done, 1 is returned, otherwise |
| 233 | * 0. |
| 234 | * |
| 235 | * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used |
| 236 | * to make sure this function only returns 1 once for a given ordered extent. |
| 237 | */ |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 238 | int btrfs_dec_test_ordered_pending(struct inode *inode, |
| 239 | u64 file_offset, u64 io_size) |
| 240 | { |
| 241 | struct btrfs_ordered_inode_tree *tree; |
| 242 | struct rb_node *node; |
| 243 | struct btrfs_ordered_extent *entry; |
| 244 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
| 245 | int ret; |
| 246 | |
| 247 | tree = &BTRFS_I(inode)->ordered_tree; |
| 248 | mutex_lock(&tree->mutex); |
| 249 | clear_extent_ordered(io_tree, file_offset, file_offset + io_size - 1, |
| 250 | GFP_NOFS); |
| 251 | node = tree_search(tree, file_offset); |
| 252 | if (!node) { |
| 253 | ret = 1; |
| 254 | goto out; |
| 255 | } |
| 256 | |
| 257 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 258 | if (!offset_in_entry(entry, file_offset)) { |
| 259 | ret = 1; |
| 260 | goto out; |
| 261 | } |
| 262 | |
| 263 | ret = test_range_bit(io_tree, entry->file_offset, |
| 264 | entry->file_offset + entry->len - 1, |
| 265 | EXTENT_ORDERED, 0); |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 266 | if (ret == 0) |
| 267 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
| 268 | out: |
| 269 | mutex_unlock(&tree->mutex); |
| 270 | return ret == 0; |
| 271 | } |
| 272 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 273 | /* |
| 274 | * used to drop a reference on an ordered extent. This will free |
| 275 | * the extent if the last reference is dropped |
| 276 | */ |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 277 | int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) |
| 278 | { |
Chris Mason | ba1da2f | 2008-07-17 12:54:15 -0400 | [diff] [blame] | 279 | struct list_head *cur; |
| 280 | struct btrfs_ordered_sum *sum; |
| 281 | |
| 282 | if (atomic_dec_and_test(&entry->refs)) { |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 283 | while (!list_empty(&entry->list)) { |
Chris Mason | ba1da2f | 2008-07-17 12:54:15 -0400 | [diff] [blame] | 284 | cur = entry->list.next; |
| 285 | sum = list_entry(cur, struct btrfs_ordered_sum, list); |
| 286 | list_del(&sum->list); |
| 287 | kfree(sum); |
| 288 | } |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 289 | kfree(entry); |
Chris Mason | ba1da2f | 2008-07-17 12:54:15 -0400 | [diff] [blame] | 290 | } |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 291 | return 0; |
| 292 | } |
| 293 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 294 | /* |
| 295 | * remove an ordered extent from the tree. No references are dropped |
| 296 | * but, anyone waiting on this extent is woken up. |
| 297 | */ |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 298 | int btrfs_remove_ordered_extent(struct inode *inode, |
| 299 | struct btrfs_ordered_extent *entry) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 300 | { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 301 | struct btrfs_ordered_inode_tree *tree; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 302 | struct rb_node *node; |
| 303 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 304 | tree = &BTRFS_I(inode)->ordered_tree; |
| 305 | mutex_lock(&tree->mutex); |
| 306 | node = &entry->rb_node; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 307 | rb_erase(node, &tree->tree); |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 308 | tree->last = NULL; |
| 309 | set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 310 | |
| 311 | spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); |
| 312 | list_del_init(&entry->root_extent_list); |
Chris Mason | 5a3f23d | 2009-03-31 13:27:11 -0400 | [diff] [blame] | 313 | |
| 314 | /* |
| 315 | * we have no more ordered extents for this inode and |
| 316 | * no dirty pages. We can safely remove it from the |
| 317 | * list of ordered extents |
| 318 | */ |
| 319 | if (RB_EMPTY_ROOT(&tree->tree) && |
| 320 | !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) { |
| 321 | list_del_init(&BTRFS_I(inode)->ordered_operations); |
| 322 | } |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 323 | spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); |
| 324 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 325 | mutex_unlock(&tree->mutex); |
| 326 | wake_up(&entry->wait); |
Chris Mason | 81d7ed2 | 2008-04-25 08:51:48 -0400 | [diff] [blame] | 327 | return 0; |
| 328 | } |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 329 | |
Chris Mason | d352ac6 | 2008-09-29 15:18:18 -0400 | [diff] [blame] | 330 | /* |
| 331 | * wait for all the ordered extents in a root. This is done when balancing |
| 332 | * space between drives. |
| 333 | */ |
Yan Zheng | 7ea394f | 2008-08-05 13:05:02 -0400 | [diff] [blame] | 334 | int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only) |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 335 | { |
| 336 | struct list_head splice; |
| 337 | struct list_head *cur; |
| 338 | struct btrfs_ordered_extent *ordered; |
| 339 | struct inode *inode; |
| 340 | |
| 341 | INIT_LIST_HEAD(&splice); |
| 342 | |
| 343 | spin_lock(&root->fs_info->ordered_extent_lock); |
| 344 | list_splice_init(&root->fs_info->ordered_extents, &splice); |
Zheng Yan | 5b21f2e | 2008-09-26 10:05:38 -0400 | [diff] [blame] | 345 | while (!list_empty(&splice)) { |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 346 | cur = splice.next; |
| 347 | ordered = list_entry(cur, struct btrfs_ordered_extent, |
| 348 | root_extent_list); |
Yan Zheng | 7ea394f | 2008-08-05 13:05:02 -0400 | [diff] [blame] | 349 | if (nocow_only && |
Yan Zheng | d899e05 | 2008-10-30 14:25:28 -0400 | [diff] [blame] | 350 | !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) && |
| 351 | !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) { |
Zheng Yan | 5b21f2e | 2008-09-26 10:05:38 -0400 | [diff] [blame] | 352 | list_move(&ordered->root_extent_list, |
| 353 | &root->fs_info->ordered_extents); |
Yan Zheng | 7ea394f | 2008-08-05 13:05:02 -0400 | [diff] [blame] | 354 | cond_resched_lock(&root->fs_info->ordered_extent_lock); |
| 355 | continue; |
| 356 | } |
| 357 | |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 358 | list_del_init(&ordered->root_extent_list); |
| 359 | atomic_inc(&ordered->refs); |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 360 | |
| 361 | /* |
Zheng Yan | 5b21f2e | 2008-09-26 10:05:38 -0400 | [diff] [blame] | 362 | * the inode may be getting freed (in sys_unlink path). |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 363 | */ |
Zheng Yan | 5b21f2e | 2008-09-26 10:05:38 -0400 | [diff] [blame] | 364 | inode = igrab(ordered->inode); |
| 365 | |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 366 | spin_unlock(&root->fs_info->ordered_extent_lock); |
| 367 | |
Zheng Yan | 5b21f2e | 2008-09-26 10:05:38 -0400 | [diff] [blame] | 368 | if (inode) { |
| 369 | btrfs_start_ordered_extent(inode, ordered, 1); |
| 370 | btrfs_put_ordered_extent(ordered); |
| 371 | iput(inode); |
| 372 | } else { |
| 373 | btrfs_put_ordered_extent(ordered); |
| 374 | } |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 375 | |
| 376 | spin_lock(&root->fs_info->ordered_extent_lock); |
| 377 | } |
| 378 | spin_unlock(&root->fs_info->ordered_extent_lock); |
| 379 | return 0; |
| 380 | } |
| 381 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 382 | /* |
Chris Mason | 5a3f23d | 2009-03-31 13:27:11 -0400 | [diff] [blame] | 383 | * this is used during transaction commit to write all the inodes |
| 384 | * added to the ordered operation list. These files must be fully on |
| 385 | * disk before the transaction commits. |
| 386 | * |
| 387 | * we have two modes here, one is to just start the IO via filemap_flush |
| 388 | * and the other is to wait for all the io. When we wait, we have an |
| 389 | * extra check to make sure the ordered operation list really is empty |
| 390 | * before we return |
| 391 | */ |
| 392 | int btrfs_run_ordered_operations(struct btrfs_root *root, int wait) |
| 393 | { |
| 394 | struct btrfs_inode *btrfs_inode; |
| 395 | struct inode *inode; |
| 396 | struct list_head splice; |
| 397 | |
| 398 | INIT_LIST_HEAD(&splice); |
| 399 | |
| 400 | mutex_lock(&root->fs_info->ordered_operations_mutex); |
| 401 | spin_lock(&root->fs_info->ordered_extent_lock); |
| 402 | again: |
| 403 | list_splice_init(&root->fs_info->ordered_operations, &splice); |
| 404 | |
| 405 | while (!list_empty(&splice)) { |
| 406 | btrfs_inode = list_entry(splice.next, struct btrfs_inode, |
| 407 | ordered_operations); |
| 408 | |
| 409 | inode = &btrfs_inode->vfs_inode; |
| 410 | |
| 411 | list_del_init(&btrfs_inode->ordered_operations); |
| 412 | |
| 413 | /* |
| 414 | * the inode may be getting freed (in sys_unlink path). |
| 415 | */ |
| 416 | inode = igrab(inode); |
| 417 | |
| 418 | if (!wait && inode) { |
| 419 | list_add_tail(&BTRFS_I(inode)->ordered_operations, |
| 420 | &root->fs_info->ordered_operations); |
| 421 | } |
| 422 | spin_unlock(&root->fs_info->ordered_extent_lock); |
| 423 | |
| 424 | if (inode) { |
| 425 | if (wait) |
| 426 | btrfs_wait_ordered_range(inode, 0, (u64)-1); |
| 427 | else |
| 428 | filemap_flush(inode->i_mapping); |
| 429 | iput(inode); |
| 430 | } |
| 431 | |
| 432 | cond_resched(); |
| 433 | spin_lock(&root->fs_info->ordered_extent_lock); |
| 434 | } |
| 435 | if (wait && !list_empty(&root->fs_info->ordered_operations)) |
| 436 | goto again; |
| 437 | |
| 438 | spin_unlock(&root->fs_info->ordered_extent_lock); |
| 439 | mutex_unlock(&root->fs_info->ordered_operations_mutex); |
| 440 | |
| 441 | return 0; |
| 442 | } |
| 443 | |
| 444 | /* |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 445 | * Used to start IO or wait for a given ordered extent to finish. |
| 446 | * |
| 447 | * If wait is one, this effectively waits on page writeback for all the pages |
| 448 | * in the extent, and it waits on the io completion code to insert |
| 449 | * metadata into the btree corresponding to the extent |
| 450 | */ |
| 451 | void btrfs_start_ordered_extent(struct inode *inode, |
| 452 | struct btrfs_ordered_extent *entry, |
| 453 | int wait) |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 454 | { |
| 455 | u64 start = entry->file_offset; |
| 456 | u64 end = start + entry->len - 1; |
| 457 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 458 | /* |
| 459 | * pages in the range can be dirty, clean or writeback. We |
| 460 | * start IO on any dirty ones so the wait doesn't stall waiting |
| 461 | * for pdflush to find them |
| 462 | */ |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 463 | btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_ALL); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 464 | if (wait) { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 465 | wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, |
| 466 | &entry->flags)); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 467 | } |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 468 | } |
| 469 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 470 | /* |
| 471 | * Used to wait on ordered extents across a large range of bytes. |
| 472 | */ |
Chris Mason | cb843a6 | 2008-10-03 12:30:02 -0400 | [diff] [blame] | 473 | int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 474 | { |
| 475 | u64 end; |
Chris Mason | e5a2217 | 2008-07-18 20:42:20 -0400 | [diff] [blame] | 476 | u64 orig_end; |
| 477 | u64 wait_end; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 478 | struct btrfs_ordered_extent *ordered; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 479 | |
Chris Mason | e5a2217 | 2008-07-18 20:42:20 -0400 | [diff] [blame] | 480 | if (start + len < start) { |
Chris Mason | f421950 | 2008-07-22 11:18:09 -0400 | [diff] [blame] | 481 | orig_end = INT_LIMIT(loff_t); |
Chris Mason | e5a2217 | 2008-07-18 20:42:20 -0400 | [diff] [blame] | 482 | } else { |
| 483 | orig_end = start + len - 1; |
Chris Mason | f421950 | 2008-07-22 11:18:09 -0400 | [diff] [blame] | 484 | if (orig_end > INT_LIMIT(loff_t)) |
| 485 | orig_end = INT_LIMIT(loff_t); |
Chris Mason | e5a2217 | 2008-07-18 20:42:20 -0400 | [diff] [blame] | 486 | } |
Chris Mason | f421950 | 2008-07-22 11:18:09 -0400 | [diff] [blame] | 487 | wait_end = orig_end; |
Chris Mason | 4a09675 | 2008-07-21 10:29:44 -0400 | [diff] [blame] | 488 | again: |
Chris Mason | e5a2217 | 2008-07-18 20:42:20 -0400 | [diff] [blame] | 489 | /* start IO across the range first to instantiate any delalloc |
| 490 | * extents |
| 491 | */ |
Chris Mason | ffbd517 | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 492 | btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL); |
Chris Mason | e5a2217 | 2008-07-18 20:42:20 -0400 | [diff] [blame] | 493 | |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 494 | /* The compression code will leave pages locked but return from |
| 495 | * writepage without setting the page writeback. Starting again |
| 496 | * with WB_SYNC_ALL will end up waiting for the IO to actually start. |
| 497 | */ |
| 498 | btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL); |
| 499 | |
Chris Mason | f421950 | 2008-07-22 11:18:09 -0400 | [diff] [blame] | 500 | btrfs_wait_on_page_writeback_range(inode->i_mapping, |
| 501 | start >> PAGE_CACHE_SHIFT, |
| 502 | orig_end >> PAGE_CACHE_SHIFT); |
| 503 | |
| 504 | end = orig_end; |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 505 | while (1) { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 506 | ordered = btrfs_lookup_first_ordered_extent(inode, end); |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 507 | if (!ordered) |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 508 | break; |
Chris Mason | e5a2217 | 2008-07-18 20:42:20 -0400 | [diff] [blame] | 509 | if (ordered->file_offset > orig_end) { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 510 | btrfs_put_ordered_extent(ordered); |
| 511 | break; |
| 512 | } |
| 513 | if (ordered->file_offset + ordered->len < start) { |
| 514 | btrfs_put_ordered_extent(ordered); |
| 515 | break; |
| 516 | } |
Chris Mason | e5a2217 | 2008-07-18 20:42:20 -0400 | [diff] [blame] | 517 | btrfs_start_ordered_extent(inode, ordered, 1); |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 518 | end = ordered->file_offset; |
| 519 | btrfs_put_ordered_extent(ordered); |
Chris Mason | e5a2217 | 2008-07-18 20:42:20 -0400 | [diff] [blame] | 520 | if (end == 0 || end == start) |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 521 | break; |
| 522 | end--; |
| 523 | } |
Chris Mason | 4a09675 | 2008-07-21 10:29:44 -0400 | [diff] [blame] | 524 | if (test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end, |
| 525 | EXTENT_ORDERED | EXTENT_DELALLOC, 0)) { |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 526 | schedule_timeout(1); |
Chris Mason | 4a09675 | 2008-07-21 10:29:44 -0400 | [diff] [blame] | 527 | goto again; |
| 528 | } |
Chris Mason | cb843a6 | 2008-10-03 12:30:02 -0400 | [diff] [blame] | 529 | return 0; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 530 | } |
| 531 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 532 | /* |
| 533 | * find an ordered extent corresponding to file_offset. return NULL if |
| 534 | * nothing is found, otherwise take a reference on the extent and return it |
| 535 | */ |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 536 | struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, |
| 537 | u64 file_offset) |
| 538 | { |
| 539 | struct btrfs_ordered_inode_tree *tree; |
| 540 | struct rb_node *node; |
| 541 | struct btrfs_ordered_extent *entry = NULL; |
| 542 | |
| 543 | tree = &BTRFS_I(inode)->ordered_tree; |
| 544 | mutex_lock(&tree->mutex); |
| 545 | node = tree_search(tree, file_offset); |
| 546 | if (!node) |
| 547 | goto out; |
| 548 | |
| 549 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 550 | if (!offset_in_entry(entry, file_offset)) |
| 551 | entry = NULL; |
| 552 | if (entry) |
| 553 | atomic_inc(&entry->refs); |
| 554 | out: |
| 555 | mutex_unlock(&tree->mutex); |
| 556 | return entry; |
| 557 | } |
| 558 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 559 | /* |
| 560 | * lookup and return any extent before 'file_offset'. NULL is returned |
| 561 | * if none is found |
| 562 | */ |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 563 | struct btrfs_ordered_extent * |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 564 | btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 565 | { |
| 566 | struct btrfs_ordered_inode_tree *tree; |
| 567 | struct rb_node *node; |
| 568 | struct btrfs_ordered_extent *entry = NULL; |
| 569 | |
| 570 | tree = &BTRFS_I(inode)->ordered_tree; |
| 571 | mutex_lock(&tree->mutex); |
| 572 | node = tree_search(tree, file_offset); |
| 573 | if (!node) |
| 574 | goto out; |
| 575 | |
| 576 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 577 | atomic_inc(&entry->refs); |
| 578 | out: |
| 579 | mutex_unlock(&tree->mutex); |
| 580 | return entry; |
| 581 | } |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 582 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 583 | /* |
| 584 | * After an extent is done, call this to conditionally update the on disk |
| 585 | * i_size. i_size is updated to cover any fully written part of the file. |
| 586 | */ |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 587 | int btrfs_ordered_update_i_size(struct inode *inode, |
| 588 | struct btrfs_ordered_extent *ordered) |
| 589 | { |
| 590 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; |
| 591 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
| 592 | u64 disk_i_size; |
| 593 | u64 new_i_size; |
| 594 | u64 i_size_test; |
| 595 | struct rb_node *node; |
| 596 | struct btrfs_ordered_extent *test; |
| 597 | |
| 598 | mutex_lock(&tree->mutex); |
| 599 | disk_i_size = BTRFS_I(inode)->disk_i_size; |
| 600 | |
| 601 | /* |
| 602 | * if the disk i_size is already at the inode->i_size, or |
| 603 | * this ordered extent is inside the disk i_size, we're done |
| 604 | */ |
| 605 | if (disk_i_size >= inode->i_size || |
| 606 | ordered->file_offset + ordered->len <= disk_i_size) { |
| 607 | goto out; |
| 608 | } |
| 609 | |
| 610 | /* |
| 611 | * we can't update the disk_isize if there are delalloc bytes |
| 612 | * between disk_i_size and this ordered extent |
| 613 | */ |
| 614 | if (test_range_bit(io_tree, disk_i_size, |
| 615 | ordered->file_offset + ordered->len - 1, |
| 616 | EXTENT_DELALLOC, 0)) { |
| 617 | goto out; |
| 618 | } |
| 619 | /* |
| 620 | * walk backward from this ordered extent to disk_i_size. |
| 621 | * if we find an ordered extent then we can't update disk i_size |
| 622 | * yet |
| 623 | */ |
Chris Mason | ba1da2f | 2008-07-17 12:54:15 -0400 | [diff] [blame] | 624 | node = &ordered->rb_node; |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 625 | while (1) { |
Chris Mason | ba1da2f | 2008-07-17 12:54:15 -0400 | [diff] [blame] | 626 | node = rb_prev(node); |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 627 | if (!node) |
| 628 | break; |
| 629 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 630 | if (test->file_offset + test->len <= disk_i_size) |
| 631 | break; |
| 632 | if (test->file_offset >= inode->i_size) |
| 633 | break; |
| 634 | if (test->file_offset >= disk_i_size) |
| 635 | goto out; |
| 636 | } |
| 637 | new_i_size = min_t(u64, entry_end(ordered), i_size_read(inode)); |
| 638 | |
| 639 | /* |
| 640 | * at this point, we know we can safely update i_size to at least |
| 641 | * the offset from this ordered extent. But, we need to |
| 642 | * walk forward and see if ios from higher up in the file have |
| 643 | * finished. |
| 644 | */ |
| 645 | node = rb_next(&ordered->rb_node); |
| 646 | i_size_test = 0; |
| 647 | if (node) { |
| 648 | /* |
| 649 | * do we have an area where IO might have finished |
| 650 | * between our ordered extent and the next one. |
| 651 | */ |
| 652 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 653 | if (test->file_offset > entry_end(ordered)) |
Yan Zheng | b48652c | 2008-08-04 23:23:47 -0400 | [diff] [blame] | 654 | i_size_test = test->file_offset; |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 655 | } else { |
| 656 | i_size_test = i_size_read(inode); |
| 657 | } |
| 658 | |
| 659 | /* |
| 660 | * i_size_test is the end of a region after this ordered |
| 661 | * extent where there are no ordered extents. As long as there |
| 662 | * are no delalloc bytes in this area, it is safe to update |
| 663 | * disk_i_size to the end of the region. |
| 664 | */ |
| 665 | if (i_size_test > entry_end(ordered) && |
Yan Zheng | b48652c | 2008-08-04 23:23:47 -0400 | [diff] [blame] | 666 | !test_range_bit(io_tree, entry_end(ordered), i_size_test - 1, |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 667 | EXTENT_DELALLOC, 0)) { |
| 668 | new_i_size = min_t(u64, i_size_test, i_size_read(inode)); |
| 669 | } |
| 670 | BTRFS_I(inode)->disk_i_size = new_i_size; |
| 671 | out: |
| 672 | mutex_unlock(&tree->mutex); |
| 673 | return 0; |
| 674 | } |
Chris Mason | ba1da2f | 2008-07-17 12:54:15 -0400 | [diff] [blame] | 675 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 676 | /* |
| 677 | * search the ordered extents for one corresponding to 'offset' and |
| 678 | * try to find a checksum. This is used because we allow pages to |
| 679 | * be reclaimed before their checksum is actually put into the btree |
| 680 | */ |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 681 | int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, |
| 682 | u32 *sum) |
Chris Mason | ba1da2f | 2008-07-17 12:54:15 -0400 | [diff] [blame] | 683 | { |
| 684 | struct btrfs_ordered_sum *ordered_sum; |
| 685 | struct btrfs_sector_sum *sector_sums; |
| 686 | struct btrfs_ordered_extent *ordered; |
| 687 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; |
Chris Mason | 3edf7d3 | 2008-07-18 06:17:13 -0400 | [diff] [blame] | 688 | unsigned long num_sectors; |
| 689 | unsigned long i; |
| 690 | u32 sectorsize = BTRFS_I(inode)->root->sectorsize; |
Chris Mason | ba1da2f | 2008-07-17 12:54:15 -0400 | [diff] [blame] | 691 | int ret = 1; |
Chris Mason | ba1da2f | 2008-07-17 12:54:15 -0400 | [diff] [blame] | 692 | |
| 693 | ordered = btrfs_lookup_ordered_extent(inode, offset); |
| 694 | if (!ordered) |
| 695 | return 1; |
| 696 | |
| 697 | mutex_lock(&tree->mutex); |
Qinghuang Feng | c6e3087 | 2009-01-21 10:59:08 -0500 | [diff] [blame] | 698 | list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 699 | if (disk_bytenr >= ordered_sum->bytenr) { |
Chris Mason | 3edf7d3 | 2008-07-18 06:17:13 -0400 | [diff] [blame] | 700 | num_sectors = ordered_sum->len / sectorsize; |
Chris Mason | ed98b56 | 2008-07-22 23:06:42 -0400 | [diff] [blame] | 701 | sector_sums = ordered_sum->sums; |
Chris Mason | 3edf7d3 | 2008-07-18 06:17:13 -0400 | [diff] [blame] | 702 | for (i = 0; i < num_sectors; i++) { |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 703 | if (sector_sums[i].bytenr == disk_bytenr) { |
Chris Mason | 3edf7d3 | 2008-07-18 06:17:13 -0400 | [diff] [blame] | 704 | *sum = sector_sums[i].sum; |
| 705 | ret = 0; |
| 706 | goto out; |
| 707 | } |
| 708 | } |
Chris Mason | ba1da2f | 2008-07-17 12:54:15 -0400 | [diff] [blame] | 709 | } |
| 710 | } |
| 711 | out: |
| 712 | mutex_unlock(&tree->mutex); |
Chris Mason | 8964222 | 2008-07-24 09:41:53 -0400 | [diff] [blame] | 713 | btrfs_put_ordered_extent(ordered); |
Chris Mason | ba1da2f | 2008-07-17 12:54:15 -0400 | [diff] [blame] | 714 | return ret; |
| 715 | } |
| 716 | |
Chris Mason | f421950 | 2008-07-22 11:18:09 -0400 | [diff] [blame] | 717 | |
| 718 | /** |
| 719 | * taken from mm/filemap.c because it isn't exported |
| 720 | * |
| 721 | * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range |
| 722 | * @mapping: address space structure to write |
| 723 | * @start: offset in bytes where the range starts |
| 724 | * @end: offset in bytes where the range ends (inclusive) |
| 725 | * @sync_mode: enable synchronous operation |
| 726 | * |
| 727 | * Start writeback against all of a mapping's dirty pages that lie |
| 728 | * within the byte offsets <start, end> inclusive. |
| 729 | * |
| 730 | * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as |
| 731 | * opposed to a regular memory cleansing writeback. The difference between |
| 732 | * these two operations is that if a dirty page/buffer is encountered, it must |
| 733 | * be waited upon, and not just skipped over. |
| 734 | */ |
| 735 | int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start, |
| 736 | loff_t end, int sync_mode) |
| 737 | { |
| 738 | struct writeback_control wbc = { |
| 739 | .sync_mode = sync_mode, |
| 740 | .nr_to_write = mapping->nrpages * 2, |
| 741 | .range_start = start, |
| 742 | .range_end = end, |
| 743 | .for_writepages = 1, |
| 744 | }; |
| 745 | return btrfs_writepages(mapping, &wbc); |
| 746 | } |
| 747 | |
| 748 | /** |
| 749 | * taken from mm/filemap.c because it isn't exported |
| 750 | * |
| 751 | * wait_on_page_writeback_range - wait for writeback to complete |
| 752 | * @mapping: target address_space |
| 753 | * @start: beginning page index |
| 754 | * @end: ending page index |
| 755 | * |
| 756 | * Wait for writeback to complete against pages indexed by start->end |
| 757 | * inclusive |
| 758 | */ |
| 759 | int btrfs_wait_on_page_writeback_range(struct address_space *mapping, |
| 760 | pgoff_t start, pgoff_t end) |
| 761 | { |
| 762 | struct pagevec pvec; |
| 763 | int nr_pages; |
| 764 | int ret = 0; |
| 765 | pgoff_t index; |
| 766 | |
| 767 | if (end < start) |
| 768 | return 0; |
| 769 | |
| 770 | pagevec_init(&pvec, 0); |
| 771 | index = start; |
| 772 | while ((index <= end) && |
| 773 | (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, |
| 774 | PAGECACHE_TAG_WRITEBACK, |
| 775 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { |
| 776 | unsigned i; |
| 777 | |
| 778 | for (i = 0; i < nr_pages; i++) { |
| 779 | struct page *page = pvec.pages[i]; |
| 780 | |
| 781 | /* until radix tree lookup accepts end_index */ |
| 782 | if (page->index > end) |
| 783 | continue; |
| 784 | |
| 785 | wait_on_page_writeback(page); |
| 786 | if (PageError(page)) |
| 787 | ret = -EIO; |
| 788 | } |
| 789 | pagevec_release(&pvec); |
| 790 | cond_resched(); |
| 791 | } |
| 792 | |
| 793 | /* Check for outstanding write errors */ |
| 794 | if (test_and_clear_bit(AS_ENOSPC, &mapping->flags)) |
| 795 | ret = -ENOSPC; |
| 796 | if (test_and_clear_bit(AS_EIO, &mapping->flags)) |
| 797 | ret = -EIO; |
| 798 | |
| 799 | return ret; |
| 800 | } |
Chris Mason | 5a3f23d | 2009-03-31 13:27:11 -0400 | [diff] [blame] | 801 | |
| 802 | /* |
| 803 | * add a given inode to the list of inodes that must be fully on |
| 804 | * disk before a transaction commit finishes. |
| 805 | * |
| 806 | * This basically gives us the ext3 style data=ordered mode, and it is mostly |
| 807 | * used to make sure renamed files are fully on disk. |
| 808 | * |
| 809 | * It is a noop if the inode is already fully on disk. |
| 810 | * |
| 811 | * If trans is not null, we'll do a friendly check for a transaction that |
| 812 | * is already flushing things and force the IO down ourselves. |
| 813 | */ |
| 814 | int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, |
| 815 | struct btrfs_root *root, |
| 816 | struct inode *inode) |
| 817 | { |
| 818 | u64 last_mod; |
| 819 | |
| 820 | last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans); |
| 821 | |
| 822 | /* |
| 823 | * if this file hasn't been changed since the last transaction |
| 824 | * commit, we can safely return without doing anything |
| 825 | */ |
| 826 | if (last_mod < root->fs_info->last_trans_committed) |
| 827 | return 0; |
| 828 | |
| 829 | /* |
| 830 | * the transaction is already committing. Just start the IO and |
| 831 | * don't bother with all of this list nonsense |
| 832 | */ |
| 833 | if (trans && root->fs_info->running_transaction->blocked) { |
| 834 | btrfs_wait_ordered_range(inode, 0, (u64)-1); |
| 835 | return 0; |
| 836 | } |
| 837 | |
| 838 | spin_lock(&root->fs_info->ordered_extent_lock); |
| 839 | if (list_empty(&BTRFS_I(inode)->ordered_operations)) { |
| 840 | list_add_tail(&BTRFS_I(inode)->ordered_operations, |
| 841 | &root->fs_info->ordered_operations); |
| 842 | } |
| 843 | spin_unlock(&root->fs_info->ordered_extent_lock); |
| 844 | |
| 845 | return 0; |
| 846 | } |