blob: c702cb62f78a310c8d430fe0d98cfb1173479a0f [file] [log] [blame]
Chris Masondc17ff82008-01-08 15:46:30 -05001/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
Chris Masondc17ff82008-01-08 15:46:30 -050019#include <linux/slab.h>
Chris Masond6bfde82008-04-30 13:59:35 -040020#include <linux/blkdev.h>
Chris Masonf4219502008-07-22 11:18:09 -040021#include <linux/writeback.h>
22#include <linux/pagevec.h>
Chris Masondc17ff82008-01-08 15:46:30 -050023#include "ctree.h"
24#include "transaction.h"
25#include "btrfs_inode.h"
Chris Masone6dcd2d2008-07-17 12:53:50 -040026#include "extent_io.h"
Miao Xie199c2a92013-05-15 07:48:23 +000027#include "disk-io.h"
Chris Masondc17ff82008-01-08 15:46:30 -050028
Miao Xie6352b912012-09-06 04:01:51 -060029static struct kmem_cache *btrfs_ordered_extent_cache;
30
Chris Masone6dcd2d2008-07-17 12:53:50 -040031static u64 entry_end(struct btrfs_ordered_extent *entry)
Chris Masondc17ff82008-01-08 15:46:30 -050032{
Chris Masone6dcd2d2008-07-17 12:53:50 -040033 if (entry->file_offset + entry->len < entry->file_offset)
34 return (u64)-1;
35 return entry->file_offset + entry->len;
Chris Masondc17ff82008-01-08 15:46:30 -050036}
37
Chris Masond352ac62008-09-29 15:18:18 -040038/* returns NULL if the insertion worked, or it returns the node it did find
39 * in the tree
40 */
Chris Masone6dcd2d2008-07-17 12:53:50 -040041static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
42 struct rb_node *node)
Chris Masondc17ff82008-01-08 15:46:30 -050043{
Chris Masond3977122009-01-05 21:25:51 -050044 struct rb_node **p = &root->rb_node;
45 struct rb_node *parent = NULL;
Chris Masone6dcd2d2008-07-17 12:53:50 -040046 struct btrfs_ordered_extent *entry;
Chris Masondc17ff82008-01-08 15:46:30 -050047
Chris Masond3977122009-01-05 21:25:51 -050048 while (*p) {
Chris Masondc17ff82008-01-08 15:46:30 -050049 parent = *p;
Chris Masone6dcd2d2008-07-17 12:53:50 -040050 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
Chris Masondc17ff82008-01-08 15:46:30 -050051
Chris Masone6dcd2d2008-07-17 12:53:50 -040052 if (file_offset < entry->file_offset)
Chris Masondc17ff82008-01-08 15:46:30 -050053 p = &(*p)->rb_left;
Chris Masone6dcd2d2008-07-17 12:53:50 -040054 else if (file_offset >= entry_end(entry))
Chris Masondc17ff82008-01-08 15:46:30 -050055 p = &(*p)->rb_right;
56 else
57 return parent;
58 }
59
60 rb_link_node(node, parent, p);
61 rb_insert_color(node, root);
62 return NULL;
63}
64
Jeff Mahoney43c04fb2011-10-03 23:22:33 -040065static void ordered_data_tree_panic(struct inode *inode, int errno,
66 u64 offset)
67{
68 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
69 btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +020070 "%llu\n", offset);
Jeff Mahoney43c04fb2011-10-03 23:22:33 -040071}
72
Chris Masond352ac62008-09-29 15:18:18 -040073/*
74 * look for a given offset in the tree, and if it can't be found return the
75 * first lesser offset
76 */
Chris Masone6dcd2d2008-07-17 12:53:50 -040077static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
78 struct rb_node **prev_ret)
Chris Masondc17ff82008-01-08 15:46:30 -050079{
Chris Masond3977122009-01-05 21:25:51 -050080 struct rb_node *n = root->rb_node;
Chris Masondc17ff82008-01-08 15:46:30 -050081 struct rb_node *prev = NULL;
Chris Masone6dcd2d2008-07-17 12:53:50 -040082 struct rb_node *test;
83 struct btrfs_ordered_extent *entry;
84 struct btrfs_ordered_extent *prev_entry = NULL;
Chris Masondc17ff82008-01-08 15:46:30 -050085
Chris Masond3977122009-01-05 21:25:51 -050086 while (n) {
Chris Masone6dcd2d2008-07-17 12:53:50 -040087 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
Chris Masondc17ff82008-01-08 15:46:30 -050088 prev = n;
89 prev_entry = entry;
Chris Masondc17ff82008-01-08 15:46:30 -050090
Chris Masone6dcd2d2008-07-17 12:53:50 -040091 if (file_offset < entry->file_offset)
Chris Masondc17ff82008-01-08 15:46:30 -050092 n = n->rb_left;
Chris Masone6dcd2d2008-07-17 12:53:50 -040093 else if (file_offset >= entry_end(entry))
Chris Masondc17ff82008-01-08 15:46:30 -050094 n = n->rb_right;
95 else
96 return n;
97 }
98 if (!prev_ret)
99 return NULL;
100
Chris Masond3977122009-01-05 21:25:51 -0500101 while (prev && file_offset >= entry_end(prev_entry)) {
Chris Masone6dcd2d2008-07-17 12:53:50 -0400102 test = rb_next(prev);
103 if (!test)
104 break;
105 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
106 rb_node);
107 if (file_offset < entry_end(prev_entry))
108 break;
109
110 prev = test;
111 }
112 if (prev)
113 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
114 rb_node);
Chris Masond3977122009-01-05 21:25:51 -0500115 while (prev && file_offset < entry_end(prev_entry)) {
Chris Masone6dcd2d2008-07-17 12:53:50 -0400116 test = rb_prev(prev);
117 if (!test)
118 break;
119 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
120 rb_node);
121 prev = test;
Chris Masondc17ff82008-01-08 15:46:30 -0500122 }
123 *prev_ret = prev;
124 return NULL;
125}
126
Chris Masond352ac62008-09-29 15:18:18 -0400127/*
128 * helper to check if a given offset is inside a given entry
129 */
Chris Masone6dcd2d2008-07-17 12:53:50 -0400130static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
Chris Masondc17ff82008-01-08 15:46:30 -0500131{
Chris Masone6dcd2d2008-07-17 12:53:50 -0400132 if (file_offset < entry->file_offset ||
133 entry->file_offset + entry->len <= file_offset)
134 return 0;
135 return 1;
136}
137
Josef Bacik4b46fce2010-05-23 11:00:55 -0400138static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
139 u64 len)
140{
141 if (file_offset + len <= entry->file_offset ||
142 entry->file_offset + entry->len <= file_offset)
143 return 0;
144 return 1;
145}
146
Chris Masond352ac62008-09-29 15:18:18 -0400147/*
148 * look find the first ordered struct that has this offset, otherwise
149 * the first one less than this offset
150 */
Chris Masone6dcd2d2008-07-17 12:53:50 -0400151static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
152 u64 file_offset)
153{
154 struct rb_root *root = &tree->tree;
Chris Masonc87fb6f2011-01-31 19:54:59 -0500155 struct rb_node *prev = NULL;
Chris Masondc17ff82008-01-08 15:46:30 -0500156 struct rb_node *ret;
Chris Masone6dcd2d2008-07-17 12:53:50 -0400157 struct btrfs_ordered_extent *entry;
158
159 if (tree->last) {
160 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
161 rb_node);
162 if (offset_in_entry(entry, file_offset))
163 return tree->last;
164 }
165 ret = __tree_search(root, file_offset, &prev);
Chris Masondc17ff82008-01-08 15:46:30 -0500166 if (!ret)
Chris Masone6dcd2d2008-07-17 12:53:50 -0400167 ret = prev;
168 if (ret)
169 tree->last = ret;
Chris Masondc17ff82008-01-08 15:46:30 -0500170 return ret;
171}
172
Chris Masoneb84ae02008-07-17 13:53:27 -0400173/* allocate and add a new ordered_extent into the per-inode tree.
174 * file_offset is the logical offset in the file
175 *
176 * start is the disk block number of an extent already reserved in the
177 * extent allocation tree
178 *
179 * len is the length of the extent
180 *
Chris Masoneb84ae02008-07-17 13:53:27 -0400181 * The tree is given a single reference on the ordered extent that was
182 * inserted.
183 */
Josef Bacik4b46fce2010-05-23 11:00:55 -0400184static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
185 u64 start, u64 len, u64 disk_len,
Li Zefan261507a02010-12-17 14:21:50 +0800186 int type, int dio, int compress_type)
Chris Masondc17ff82008-01-08 15:46:30 -0500187{
Miao Xie199c2a92013-05-15 07:48:23 +0000188 struct btrfs_root *root = BTRFS_I(inode)->root;
Chris Masondc17ff82008-01-08 15:46:30 -0500189 struct btrfs_ordered_inode_tree *tree;
Chris Masone6dcd2d2008-07-17 12:53:50 -0400190 struct rb_node *node;
191 struct btrfs_ordered_extent *entry;
Chris Masondc17ff82008-01-08 15:46:30 -0500192
Chris Masone6dcd2d2008-07-17 12:53:50 -0400193 tree = &BTRFS_I(inode)->ordered_tree;
Miao Xie6352b912012-09-06 04:01:51 -0600194 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
Chris Masondc17ff82008-01-08 15:46:30 -0500195 if (!entry)
196 return -ENOMEM;
197
Chris Masone6dcd2d2008-07-17 12:53:50 -0400198 entry->file_offset = file_offset;
199 entry->start = start;
200 entry->len = len;
Josef Bacik2ab28f32012-10-12 15:27:49 -0400201 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) &&
202 !(type == BTRFS_ORDERED_NOCOW))
203 entry->csum_bytes_left = disk_len;
Chris Masonc8b97812008-10-29 14:49:59 -0400204 entry->disk_len = disk_len;
Chris Mason8b62b722009-09-02 16:53:46 -0400205 entry->bytes_left = len;
Josef Bacik5fd02042012-05-02 14:00:54 -0400206 entry->inode = igrab(inode);
Li Zefan261507a02010-12-17 14:21:50 +0800207 entry->compress_type = compress_type;
Josef Bacik77cef2e2013-08-29 13:57:21 -0400208 entry->truncated_len = (u64)-1;
Yan Zhengd899e052008-10-30 14:25:28 -0400209 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
Yan Zheng80ff3852008-10-30 14:20:02 -0400210 set_bit(type, &entry->flags);
Chris Mason3eaa2882008-07-24 11:57:52 -0400211
Josef Bacik4b46fce2010-05-23 11:00:55 -0400212 if (dio)
213 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
214
Chris Masone6dcd2d2008-07-17 12:53:50 -0400215 /* one ref for the tree */
216 atomic_set(&entry->refs, 1);
217 init_waitqueue_head(&entry->wait);
218 INIT_LIST_HEAD(&entry->list);
Chris Mason3eaa2882008-07-24 11:57:52 -0400219 INIT_LIST_HEAD(&entry->root_extent_list);
Miao Xie9afab882012-10-25 09:41:36 +0000220 INIT_LIST_HEAD(&entry->work_list);
221 init_completion(&entry->completion);
Josef Bacik2ab28f32012-10-12 15:27:49 -0400222 INIT_LIST_HEAD(&entry->log_list);
Chris Masondc17ff82008-01-08 15:46:30 -0500223
liubo1abe9b82011-03-24 11:18:59 +0000224 trace_btrfs_ordered_extent_add(inode, entry);
225
Josef Bacik5fd02042012-05-02 14:00:54 -0400226 spin_lock_irq(&tree->lock);
Chris Masone6dcd2d2008-07-17 12:53:50 -0400227 node = tree_insert(&tree->tree, file_offset,
228 &entry->rb_node);
Jeff Mahoney43c04fb2011-10-03 23:22:33 -0400229 if (node)
230 ordered_data_tree_panic(inode, -EEXIST, file_offset);
Josef Bacik5fd02042012-05-02 14:00:54 -0400231 spin_unlock_irq(&tree->lock);
Chris Masond3977122009-01-05 21:25:51 -0500232
Miao Xie199c2a92013-05-15 07:48:23 +0000233 spin_lock(&root->ordered_extent_lock);
Chris Mason3eaa2882008-07-24 11:57:52 -0400234 list_add_tail(&entry->root_extent_list,
Miao Xie199c2a92013-05-15 07:48:23 +0000235 &root->ordered_extents);
236 root->nr_ordered_extents++;
237 if (root->nr_ordered_extents == 1) {
238 spin_lock(&root->fs_info->ordered_root_lock);
239 BUG_ON(!list_empty(&root->ordered_root));
240 list_add_tail(&root->ordered_root,
241 &root->fs_info->ordered_roots);
242 spin_unlock(&root->fs_info->ordered_root_lock);
243 }
244 spin_unlock(&root->ordered_extent_lock);
Chris Mason3eaa2882008-07-24 11:57:52 -0400245
Chris Masone6dcd2d2008-07-17 12:53:50 -0400246 return 0;
247}
Chris Masondc17ff82008-01-08 15:46:30 -0500248
Josef Bacik4b46fce2010-05-23 11:00:55 -0400249int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
250 u64 start, u64 len, u64 disk_len, int type)
251{
252 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
Li Zefan261507a02010-12-17 14:21:50 +0800253 disk_len, type, 0,
254 BTRFS_COMPRESS_NONE);
Josef Bacik4b46fce2010-05-23 11:00:55 -0400255}
256
257int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
258 u64 start, u64 len, u64 disk_len, int type)
259{
260 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
Li Zefan261507a02010-12-17 14:21:50 +0800261 disk_len, type, 1,
262 BTRFS_COMPRESS_NONE);
263}
264
265int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
266 u64 start, u64 len, u64 disk_len,
267 int type, int compress_type)
268{
269 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
270 disk_len, type, 0,
271 compress_type);
Josef Bacik4b46fce2010-05-23 11:00:55 -0400272}
273
Chris Masoneb84ae02008-07-17 13:53:27 -0400274/*
275 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
Chris Mason3edf7d32008-07-18 06:17:13 -0400276 * when an ordered extent is finished. If the list covers more than one
277 * ordered extent, it is split across multiples.
Chris Masoneb84ae02008-07-17 13:53:27 -0400278 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100279void btrfs_add_ordered_sum(struct inode *inode,
280 struct btrfs_ordered_extent *entry,
281 struct btrfs_ordered_sum *sum)
Chris Masone6dcd2d2008-07-17 12:53:50 -0400282{
283 struct btrfs_ordered_inode_tree *tree;
Chris Mason1b1e2132008-06-25 16:01:31 -0400284
Chris Masone6dcd2d2008-07-17 12:53:50 -0400285 tree = &BTRFS_I(inode)->ordered_tree;
Josef Bacik5fd02042012-05-02 14:00:54 -0400286 spin_lock_irq(&tree->lock);
Chris Masone6dcd2d2008-07-17 12:53:50 -0400287 list_add_tail(&sum->list, &entry->list);
Josef Bacik2ab28f32012-10-12 15:27:49 -0400288 WARN_ON(entry->csum_bytes_left < sum->len);
289 entry->csum_bytes_left -= sum->len;
290 if (entry->csum_bytes_left == 0)
291 wake_up(&entry->wait);
Josef Bacik5fd02042012-05-02 14:00:54 -0400292 spin_unlock_irq(&tree->lock);
Chris Masone6dcd2d2008-07-17 12:53:50 -0400293}
294
Chris Masoneb84ae02008-07-17 13:53:27 -0400295/*
296 * this is used to account for finished IO across a given range
Chris Mason163cf092010-11-28 19:56:33 -0500297 * of the file. The IO may span ordered extents. If
298 * a given ordered_extent is completely done, 1 is returned, otherwise
299 * 0.
300 *
301 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
302 * to make sure this function only returns 1 once for a given ordered extent.
303 *
304 * file_offset is updated to one byte past the range that is recorded as
305 * complete. This allows you to walk forward in the file.
306 */
307int btrfs_dec_test_first_ordered_pending(struct inode *inode,
308 struct btrfs_ordered_extent **cached,
Josef Bacik5fd02042012-05-02 14:00:54 -0400309 u64 *file_offset, u64 io_size, int uptodate)
Chris Mason163cf092010-11-28 19:56:33 -0500310{
311 struct btrfs_ordered_inode_tree *tree;
312 struct rb_node *node;
313 struct btrfs_ordered_extent *entry = NULL;
314 int ret;
Josef Bacik5fd02042012-05-02 14:00:54 -0400315 unsigned long flags;
Chris Mason163cf092010-11-28 19:56:33 -0500316 u64 dec_end;
317 u64 dec_start;
318 u64 to_dec;
319
320 tree = &BTRFS_I(inode)->ordered_tree;
Josef Bacik5fd02042012-05-02 14:00:54 -0400321 spin_lock_irqsave(&tree->lock, flags);
Chris Mason163cf092010-11-28 19:56:33 -0500322 node = tree_search(tree, *file_offset);
323 if (!node) {
324 ret = 1;
325 goto out;
326 }
327
328 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
329 if (!offset_in_entry(entry, *file_offset)) {
330 ret = 1;
331 goto out;
332 }
333
334 dec_start = max(*file_offset, entry->file_offset);
335 dec_end = min(*file_offset + io_size, entry->file_offset +
336 entry->len);
337 *file_offset = dec_end;
338 if (dec_start > dec_end) {
339 printk(KERN_CRIT "bad ordering dec_start %llu end %llu\n",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200340 dec_start, dec_end);
Chris Mason163cf092010-11-28 19:56:33 -0500341 }
342 to_dec = dec_end - dec_start;
343 if (to_dec > entry->bytes_left) {
344 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200345 entry->bytes_left, to_dec);
Chris Mason163cf092010-11-28 19:56:33 -0500346 }
347 entry->bytes_left -= to_dec;
Josef Bacik5fd02042012-05-02 14:00:54 -0400348 if (!uptodate)
349 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
350
Chris Mason163cf092010-11-28 19:56:33 -0500351 if (entry->bytes_left == 0)
352 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
353 else
354 ret = 1;
355out:
356 if (!ret && cached && entry) {
357 *cached = entry;
358 atomic_inc(&entry->refs);
359 }
Josef Bacik5fd02042012-05-02 14:00:54 -0400360 spin_unlock_irqrestore(&tree->lock, flags);
Chris Mason163cf092010-11-28 19:56:33 -0500361 return ret == 0;
362}
363
364/*
365 * this is used to account for finished IO across a given range
Chris Masoneb84ae02008-07-17 13:53:27 -0400366 * of the file. The IO should not span ordered extents. If
367 * a given ordered_extent is completely done, 1 is returned, otherwise
368 * 0.
369 *
370 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
371 * to make sure this function only returns 1 once for a given ordered extent.
372 */
Chris Masone6dcd2d2008-07-17 12:53:50 -0400373int btrfs_dec_test_ordered_pending(struct inode *inode,
Josef Bacik5a1a3df2010-02-02 20:51:14 +0000374 struct btrfs_ordered_extent **cached,
Josef Bacik5fd02042012-05-02 14:00:54 -0400375 u64 file_offset, u64 io_size, int uptodate)
Chris Masone6dcd2d2008-07-17 12:53:50 -0400376{
377 struct btrfs_ordered_inode_tree *tree;
378 struct rb_node *node;
Josef Bacik5a1a3df2010-02-02 20:51:14 +0000379 struct btrfs_ordered_extent *entry = NULL;
Josef Bacik5fd02042012-05-02 14:00:54 -0400380 unsigned long flags;
Chris Masone6dcd2d2008-07-17 12:53:50 -0400381 int ret;
382
383 tree = &BTRFS_I(inode)->ordered_tree;
Josef Bacik5fd02042012-05-02 14:00:54 -0400384 spin_lock_irqsave(&tree->lock, flags);
385 if (cached && *cached) {
386 entry = *cached;
387 goto have_entry;
388 }
389
Chris Masone6dcd2d2008-07-17 12:53:50 -0400390 node = tree_search(tree, file_offset);
391 if (!node) {
392 ret = 1;
393 goto out;
394 }
395
396 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
Josef Bacik5fd02042012-05-02 14:00:54 -0400397have_entry:
Chris Masone6dcd2d2008-07-17 12:53:50 -0400398 if (!offset_in_entry(entry, file_offset)) {
399 ret = 1;
400 goto out;
401 }
402
Chris Mason8b62b722009-09-02 16:53:46 -0400403 if (io_size > entry->bytes_left) {
404 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200405 entry->bytes_left, io_size);
Chris Mason8b62b722009-09-02 16:53:46 -0400406 }
407 entry->bytes_left -= io_size;
Josef Bacik5fd02042012-05-02 14:00:54 -0400408 if (!uptodate)
409 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
410
Chris Mason8b62b722009-09-02 16:53:46 -0400411 if (entry->bytes_left == 0)
Chris Masone6dcd2d2008-07-17 12:53:50 -0400412 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
Chris Mason8b62b722009-09-02 16:53:46 -0400413 else
414 ret = 1;
Chris Masone6dcd2d2008-07-17 12:53:50 -0400415out:
Josef Bacik5a1a3df2010-02-02 20:51:14 +0000416 if (!ret && cached && entry) {
417 *cached = entry;
418 atomic_inc(&entry->refs);
419 }
Josef Bacik5fd02042012-05-02 14:00:54 -0400420 spin_unlock_irqrestore(&tree->lock, flags);
Chris Masone6dcd2d2008-07-17 12:53:50 -0400421 return ret == 0;
422}
423
Josef Bacik2ab28f32012-10-12 15:27:49 -0400424/* Needs to either be called under a log transaction or the log_mutex */
425void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode)
426{
427 struct btrfs_ordered_inode_tree *tree;
428 struct btrfs_ordered_extent *ordered;
429 struct rb_node *n;
430 int index = log->log_transid % 2;
431
432 tree = &BTRFS_I(inode)->ordered_tree;
433 spin_lock_irq(&tree->lock);
434 for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
435 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
436 spin_lock(&log->log_extents_lock[index]);
437 if (list_empty(&ordered->log_list)) {
438 list_add_tail(&ordered->log_list, &log->logged_list[index]);
439 atomic_inc(&ordered->refs);
440 }
441 spin_unlock(&log->log_extents_lock[index]);
442 }
443 spin_unlock_irq(&tree->lock);
444}
445
446void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
447{
448 struct btrfs_ordered_extent *ordered;
449 int index = transid % 2;
450
451 spin_lock_irq(&log->log_extents_lock[index]);
452 while (!list_empty(&log->logged_list[index])) {
453 ordered = list_first_entry(&log->logged_list[index],
454 struct btrfs_ordered_extent,
455 log_list);
456 list_del_init(&ordered->log_list);
457 spin_unlock_irq(&log->log_extents_lock[index]);
458 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
459 &ordered->flags));
460 btrfs_put_ordered_extent(ordered);
461 spin_lock_irq(&log->log_extents_lock[index]);
462 }
463 spin_unlock_irq(&log->log_extents_lock[index]);
464}
465
466void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
467{
468 struct btrfs_ordered_extent *ordered;
469 int index = transid % 2;
470
471 spin_lock_irq(&log->log_extents_lock[index]);
472 while (!list_empty(&log->logged_list[index])) {
473 ordered = list_first_entry(&log->logged_list[index],
474 struct btrfs_ordered_extent,
475 log_list);
476 list_del_init(&ordered->log_list);
477 spin_unlock_irq(&log->log_extents_lock[index]);
478 btrfs_put_ordered_extent(ordered);
479 spin_lock_irq(&log->log_extents_lock[index]);
480 }
481 spin_unlock_irq(&log->log_extents_lock[index]);
482}
483
Chris Masoneb84ae02008-07-17 13:53:27 -0400484/*
485 * used to drop a reference on an ordered extent. This will free
486 * the extent if the last reference is dropped
487 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100488void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
Chris Masone6dcd2d2008-07-17 12:53:50 -0400489{
Chris Masonba1da2f2008-07-17 12:54:15 -0400490 struct list_head *cur;
491 struct btrfs_ordered_sum *sum;
492
liubo1abe9b82011-03-24 11:18:59 +0000493 trace_btrfs_ordered_extent_put(entry->inode, entry);
494
Chris Masonba1da2f2008-07-17 12:54:15 -0400495 if (atomic_dec_and_test(&entry->refs)) {
Josef Bacik5fd02042012-05-02 14:00:54 -0400496 if (entry->inode)
497 btrfs_add_delayed_iput(entry->inode);
Chris Masond3977122009-01-05 21:25:51 -0500498 while (!list_empty(&entry->list)) {
Chris Masonba1da2f2008-07-17 12:54:15 -0400499 cur = entry->list.next;
500 sum = list_entry(cur, struct btrfs_ordered_sum, list);
501 list_del(&sum->list);
502 kfree(sum);
503 }
Miao Xie6352b912012-09-06 04:01:51 -0600504 kmem_cache_free(btrfs_ordered_extent_cache, entry);
Chris Masonba1da2f2008-07-17 12:54:15 -0400505 }
Chris Masondc17ff82008-01-08 15:46:30 -0500506}
507
Chris Masoneb84ae02008-07-17 13:53:27 -0400508/*
509 * remove an ordered extent from the tree. No references are dropped
Josef Bacik5fd02042012-05-02 14:00:54 -0400510 * and waiters are woken up.
Chris Masoneb84ae02008-07-17 13:53:27 -0400511 */
Josef Bacik5fd02042012-05-02 14:00:54 -0400512void btrfs_remove_ordered_extent(struct inode *inode,
513 struct btrfs_ordered_extent *entry)
Chris Masondc17ff82008-01-08 15:46:30 -0500514{
Chris Masone6dcd2d2008-07-17 12:53:50 -0400515 struct btrfs_ordered_inode_tree *tree;
Josef Bacik287a0ab2010-03-19 18:07:23 +0000516 struct btrfs_root *root = BTRFS_I(inode)->root;
Chris Masondc17ff82008-01-08 15:46:30 -0500517 struct rb_node *node;
518
Chris Masone6dcd2d2008-07-17 12:53:50 -0400519 tree = &BTRFS_I(inode)->ordered_tree;
Josef Bacik5fd02042012-05-02 14:00:54 -0400520 spin_lock_irq(&tree->lock);
Chris Masone6dcd2d2008-07-17 12:53:50 -0400521 node = &entry->rb_node;
Chris Masondc17ff82008-01-08 15:46:30 -0500522 rb_erase(node, &tree->tree);
Chris Masone6dcd2d2008-07-17 12:53:50 -0400523 tree->last = NULL;
524 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
Josef Bacik5fd02042012-05-02 14:00:54 -0400525 spin_unlock_irq(&tree->lock);
Chris Mason3eaa2882008-07-24 11:57:52 -0400526
Miao Xie199c2a92013-05-15 07:48:23 +0000527 spin_lock(&root->ordered_extent_lock);
Chris Mason3eaa2882008-07-24 11:57:52 -0400528 list_del_init(&entry->root_extent_list);
Miao Xie199c2a92013-05-15 07:48:23 +0000529 root->nr_ordered_extents--;
Chris Mason5a3f23d2009-03-31 13:27:11 -0400530
liubo1abe9b82011-03-24 11:18:59 +0000531 trace_btrfs_ordered_extent_remove(inode, entry);
532
Chris Mason5a3f23d2009-03-31 13:27:11 -0400533 /*
534 * we have no more ordered extents for this inode and
535 * no dirty pages. We can safely remove it from the
536 * list of ordered extents
537 */
538 if (RB_EMPTY_ROOT(&tree->tree) &&
539 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
540 list_del_init(&BTRFS_I(inode)->ordered_operations);
541 }
Miao Xie199c2a92013-05-15 07:48:23 +0000542
543 if (!root->nr_ordered_extents) {
544 spin_lock(&root->fs_info->ordered_root_lock);
545 BUG_ON(list_empty(&root->ordered_root));
546 list_del_init(&root->ordered_root);
547 spin_unlock(&root->fs_info->ordered_root_lock);
548 }
549 spin_unlock(&root->ordered_extent_lock);
Chris Masone6dcd2d2008-07-17 12:53:50 -0400550 wake_up(&entry->wait);
Chris Mason81d7ed22008-04-25 08:51:48 -0400551}
Chris Masone6dcd2d2008-07-17 12:53:50 -0400552
Miao Xie9afab882012-10-25 09:41:36 +0000553static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
554{
555 struct btrfs_ordered_extent *ordered;
556
557 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
558 btrfs_start_ordered_extent(ordered->inode, ordered, 1);
559 complete(&ordered->completion);
560}
561
Chris Masond352ac62008-09-29 15:18:18 -0400562/*
563 * wait for all the ordered extents in a root. This is done when balancing
564 * space between drives.
565 */
Josef Bacikf0de1812013-09-17 10:55:51 -0400566void btrfs_wait_ordered_extents(struct btrfs_root *root)
Chris Mason3eaa2882008-07-24 11:57:52 -0400567{
Miao Xie9afab882012-10-25 09:41:36 +0000568 struct list_head splice, works;
Miao Xie9afab882012-10-25 09:41:36 +0000569 struct btrfs_ordered_extent *ordered, *next;
Chris Mason3eaa2882008-07-24 11:57:52 -0400570
571 INIT_LIST_HEAD(&splice);
Miao Xie9afab882012-10-25 09:41:36 +0000572 INIT_LIST_HEAD(&works);
Chris Mason3eaa2882008-07-24 11:57:52 -0400573
Josef Bacikdb1d6072013-03-26 15:29:11 -0400574 mutex_lock(&root->fs_info->ordered_operations_mutex);
Miao Xie199c2a92013-05-15 07:48:23 +0000575 spin_lock(&root->ordered_extent_lock);
576 list_splice_init(&root->ordered_extents, &splice);
Zheng Yan5b21f2e2008-09-26 10:05:38 -0400577 while (!list_empty(&splice)) {
Miao Xie199c2a92013-05-15 07:48:23 +0000578 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
579 root_extent_list);
580 list_move_tail(&ordered->root_extent_list,
581 &root->ordered_extents);
Miao Xie199c2a92013-05-15 07:48:23 +0000582 atomic_inc(&ordered->refs);
583 spin_unlock(&root->ordered_extent_lock);
584
585 ordered->flush_work.func = btrfs_run_ordered_extent_work;
586 list_add_tail(&ordered->work_list, &works);
587 btrfs_queue_worker(&root->fs_info->flush_workers,
588 &ordered->flush_work);
589
Miao Xie9afab882012-10-25 09:41:36 +0000590 cond_resched();
Miao Xie199c2a92013-05-15 07:48:23 +0000591 spin_lock(&root->ordered_extent_lock);
Chris Mason3eaa2882008-07-24 11:57:52 -0400592 }
Miao Xie199c2a92013-05-15 07:48:23 +0000593 spin_unlock(&root->ordered_extent_lock);
Miao Xie9afab882012-10-25 09:41:36 +0000594
595 list_for_each_entry_safe(ordered, next, &works, work_list) {
596 list_del_init(&ordered->work_list);
597 wait_for_completion(&ordered->completion);
Miao Xie9afab882012-10-25 09:41:36 +0000598 btrfs_put_ordered_extent(ordered);
Miao Xie9afab882012-10-25 09:41:36 +0000599 cond_resched();
600 }
Josef Bacikdb1d6072013-03-26 15:29:11 -0400601 mutex_unlock(&root->fs_info->ordered_operations_mutex);
Chris Mason3eaa2882008-07-24 11:57:52 -0400602}
603
Josef Bacikf0de1812013-09-17 10:55:51 -0400604void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info)
Miao Xie199c2a92013-05-15 07:48:23 +0000605{
606 struct btrfs_root *root;
607 struct list_head splice;
608
609 INIT_LIST_HEAD(&splice);
610
611 spin_lock(&fs_info->ordered_root_lock);
612 list_splice_init(&fs_info->ordered_roots, &splice);
613 while (!list_empty(&splice)) {
614 root = list_first_entry(&splice, struct btrfs_root,
615 ordered_root);
616 root = btrfs_grab_fs_root(root);
617 BUG_ON(!root);
618 list_move_tail(&root->ordered_root,
619 &fs_info->ordered_roots);
620 spin_unlock(&fs_info->ordered_root_lock);
621
Josef Bacikf0de1812013-09-17 10:55:51 -0400622 btrfs_wait_ordered_extents(root);
Miao Xie199c2a92013-05-15 07:48:23 +0000623 btrfs_put_fs_root(root);
624
625 spin_lock(&fs_info->ordered_root_lock);
626 }
627 spin_unlock(&fs_info->ordered_root_lock);
628}
629
Chris Masoneb84ae02008-07-17 13:53:27 -0400630/*
Chris Mason5a3f23d2009-03-31 13:27:11 -0400631 * this is used during transaction commit to write all the inodes
632 * added to the ordered operation list. These files must be fully on
633 * disk before the transaction commits.
634 *
635 * we have two modes here, one is to just start the IO via filemap_flush
636 * and the other is to wait for all the io. When we wait, we have an
637 * extra check to make sure the ordered operation list really is empty
638 * before we return
639 */
Josef Bacik569e0f32013-02-13 11:09:14 -0500640int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
641 struct btrfs_root *root, int wait)
Chris Mason5a3f23d2009-03-31 13:27:11 -0400642{
643 struct btrfs_inode *btrfs_inode;
644 struct inode *inode;
Josef Bacik569e0f32013-02-13 11:09:14 -0500645 struct btrfs_transaction *cur_trans = trans->transaction;
Chris Mason5a3f23d2009-03-31 13:27:11 -0400646 struct list_head splice;
Miao Xie25287e02012-10-25 09:31:03 +0000647 struct list_head works;
648 struct btrfs_delalloc_work *work, *next;
649 int ret = 0;
Chris Mason5a3f23d2009-03-31 13:27:11 -0400650
651 INIT_LIST_HEAD(&splice);
Miao Xie25287e02012-10-25 09:31:03 +0000652 INIT_LIST_HEAD(&works);
Chris Mason5a3f23d2009-03-31 13:27:11 -0400653
Josef Bacik9ffba8c2013-08-14 11:33:56 -0400654 mutex_lock(&root->fs_info->ordered_extent_flush_mutex);
Miao Xie199c2a92013-05-15 07:48:23 +0000655 spin_lock(&root->fs_info->ordered_root_lock);
Josef Bacik569e0f32013-02-13 11:09:14 -0500656 list_splice_init(&cur_trans->ordered_operations, &splice);
Chris Mason5a3f23d2009-03-31 13:27:11 -0400657 while (!list_empty(&splice)) {
658 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
659 ordered_operations);
Chris Mason5a3f23d2009-03-31 13:27:11 -0400660 inode = &btrfs_inode->vfs_inode;
661
662 list_del_init(&btrfs_inode->ordered_operations);
663
664 /*
665 * the inode may be getting freed (in sys_unlink path).
666 */
667 inode = igrab(inode);
Miao Xie25287e02012-10-25 09:31:03 +0000668 if (!inode)
669 continue;
Miao Xie5b947f12013-01-22 10:52:04 +0000670
671 if (!wait)
672 list_add_tail(&BTRFS_I(inode)->ordered_operations,
Josef Bacik569e0f32013-02-13 11:09:14 -0500673 &cur_trans->ordered_operations);
Miao Xie199c2a92013-05-15 07:48:23 +0000674 spin_unlock(&root->fs_info->ordered_root_lock);
Chris Mason5a3f23d2009-03-31 13:27:11 -0400675
Miao Xie25287e02012-10-25 09:31:03 +0000676 work = btrfs_alloc_delalloc_work(inode, wait, 1);
677 if (!work) {
Miao Xie199c2a92013-05-15 07:48:23 +0000678 spin_lock(&root->fs_info->ordered_root_lock);
Miao Xie25287e02012-10-25 09:31:03 +0000679 if (list_empty(&BTRFS_I(inode)->ordered_operations))
680 list_add_tail(&btrfs_inode->ordered_operations,
681 &splice);
Miao Xie25287e02012-10-25 09:31:03 +0000682 list_splice_tail(&splice,
Josef Bacik569e0f32013-02-13 11:09:14 -0500683 &cur_trans->ordered_operations);
Miao Xie199c2a92013-05-15 07:48:23 +0000684 spin_unlock(&root->fs_info->ordered_root_lock);
Miao Xie25287e02012-10-25 09:31:03 +0000685 ret = -ENOMEM;
686 goto out;
Chris Mason5a3f23d2009-03-31 13:27:11 -0400687 }
Miao Xie25287e02012-10-25 09:31:03 +0000688 list_add_tail(&work->list, &works);
689 btrfs_queue_worker(&root->fs_info->flush_workers,
690 &work->work);
Chris Mason5a3f23d2009-03-31 13:27:11 -0400691
692 cond_resched();
Miao Xie199c2a92013-05-15 07:48:23 +0000693 spin_lock(&root->fs_info->ordered_root_lock);
Chris Mason5a3f23d2009-03-31 13:27:11 -0400694 }
Miao Xie199c2a92013-05-15 07:48:23 +0000695 spin_unlock(&root->fs_info->ordered_root_lock);
Miao Xie25287e02012-10-25 09:31:03 +0000696out:
697 list_for_each_entry_safe(work, next, &works, list) {
698 list_del_init(&work->list);
699 btrfs_wait_and_free_delalloc_work(work);
700 }
Josef Bacik9ffba8c2013-08-14 11:33:56 -0400701 mutex_unlock(&root->fs_info->ordered_extent_flush_mutex);
Miao Xie25287e02012-10-25 09:31:03 +0000702 return ret;
Chris Mason5a3f23d2009-03-31 13:27:11 -0400703}
704
705/*
Chris Masoneb84ae02008-07-17 13:53:27 -0400706 * Used to start IO or wait for a given ordered extent to finish.
707 *
708 * If wait is one, this effectively waits on page writeback for all the pages
709 * in the extent, and it waits on the io completion code to insert
710 * metadata into the btree corresponding to the extent
711 */
712void btrfs_start_ordered_extent(struct inode *inode,
713 struct btrfs_ordered_extent *entry,
714 int wait)
Chris Masone6dcd2d2008-07-17 12:53:50 -0400715{
716 u64 start = entry->file_offset;
717 u64 end = start + entry->len - 1;
718
liubo1abe9b82011-03-24 11:18:59 +0000719 trace_btrfs_ordered_extent_start(inode, entry);
720
Chris Masoneb84ae02008-07-17 13:53:27 -0400721 /*
722 * pages in the range can be dirty, clean or writeback. We
723 * start IO on any dirty ones so the wait doesn't stall waiting
Artem Bityutskiyb2570312012-07-25 18:12:06 +0300724 * for the flusher thread to find them
Chris Masoneb84ae02008-07-17 13:53:27 -0400725 */
Josef Bacik4b46fce2010-05-23 11:00:55 -0400726 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
727 filemap_fdatawrite_range(inode->i_mapping, start, end);
Chris Masonc8b97812008-10-29 14:49:59 -0400728 if (wait) {
Chris Masone6dcd2d2008-07-17 12:53:50 -0400729 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
730 &entry->flags));
Chris Masonc8b97812008-10-29 14:49:59 -0400731 }
Chris Masone6dcd2d2008-07-17 12:53:50 -0400732}
733
Chris Masoneb84ae02008-07-17 13:53:27 -0400734/*
735 * Used to wait on ordered extents across a large range of bytes.
736 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100737void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
Chris Masone6dcd2d2008-07-17 12:53:50 -0400738{
739 u64 end;
Chris Masone5a22172008-07-18 20:42:20 -0400740 u64 orig_end;
Chris Masone6dcd2d2008-07-17 12:53:50 -0400741 struct btrfs_ordered_extent *ordered;
Chris Masone6dcd2d2008-07-17 12:53:50 -0400742
Chris Masone5a22172008-07-18 20:42:20 -0400743 if (start + len < start) {
Chris Masonf4219502008-07-22 11:18:09 -0400744 orig_end = INT_LIMIT(loff_t);
Chris Masone5a22172008-07-18 20:42:20 -0400745 } else {
746 orig_end = start + len - 1;
Chris Masonf4219502008-07-22 11:18:09 -0400747 if (orig_end > INT_LIMIT(loff_t))
748 orig_end = INT_LIMIT(loff_t);
Chris Masone5a22172008-07-18 20:42:20 -0400749 }
Josef Bacik551ebb22012-04-23 14:41:09 -0400750
Chris Masone5a22172008-07-18 20:42:20 -0400751 /* start IO across the range first to instantiate any delalloc
752 * extents
753 */
Josef Bacik7ddf5a42012-06-08 15:26:47 -0400754 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
755
756 /*
757 * So with compression we will find and lock a dirty page and clear the
758 * first one as dirty, setup an async extent, and immediately return
759 * with the entire range locked but with nobody actually marked with
760 * writeback. So we can't just filemap_write_and_wait_range() and
761 * expect it to work since it will just kick off a thread to do the
762 * actual work. So we need to call filemap_fdatawrite_range _again_
763 * since it will wait on the page lock, which won't be unlocked until
764 * after the pages have been marked as writeback and so we're good to go
765 * from there. We have to do this otherwise we'll miss the ordered
766 * extents and that results in badness. Please Josef, do not think you
767 * know better and pull this out at some point in the future, it is
768 * right and you are wrong.
769 */
770 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
771 &BTRFS_I(inode)->runtime_flags))
772 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
773
774 filemap_fdatawait_range(inode->i_mapping, start, orig_end);
Chris Masonf4219502008-07-22 11:18:09 -0400775
776 end = orig_end;
Chris Masond3977122009-01-05 21:25:51 -0500777 while (1) {
Chris Masone6dcd2d2008-07-17 12:53:50 -0400778 ordered = btrfs_lookup_first_ordered_extent(inode, end);
Chris Masond3977122009-01-05 21:25:51 -0500779 if (!ordered)
Chris Masone6dcd2d2008-07-17 12:53:50 -0400780 break;
Chris Masone5a22172008-07-18 20:42:20 -0400781 if (ordered->file_offset > orig_end) {
Chris Masone6dcd2d2008-07-17 12:53:50 -0400782 btrfs_put_ordered_extent(ordered);
783 break;
784 }
785 if (ordered->file_offset + ordered->len < start) {
786 btrfs_put_ordered_extent(ordered);
787 break;
788 }
Chris Masone5a22172008-07-18 20:42:20 -0400789 btrfs_start_ordered_extent(inode, ordered, 1);
Chris Masone6dcd2d2008-07-17 12:53:50 -0400790 end = ordered->file_offset;
791 btrfs_put_ordered_extent(ordered);
Chris Masone5a22172008-07-18 20:42:20 -0400792 if (end == 0 || end == start)
Chris Masone6dcd2d2008-07-17 12:53:50 -0400793 break;
794 end--;
795 }
Chris Masone6dcd2d2008-07-17 12:53:50 -0400796}
797
Chris Masoneb84ae02008-07-17 13:53:27 -0400798/*
799 * find an ordered extent corresponding to file_offset. return NULL if
800 * nothing is found, otherwise take a reference on the extent and return it
801 */
Chris Masone6dcd2d2008-07-17 12:53:50 -0400802struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
803 u64 file_offset)
804{
805 struct btrfs_ordered_inode_tree *tree;
806 struct rb_node *node;
807 struct btrfs_ordered_extent *entry = NULL;
808
809 tree = &BTRFS_I(inode)->ordered_tree;
Josef Bacik5fd02042012-05-02 14:00:54 -0400810 spin_lock_irq(&tree->lock);
Chris Masone6dcd2d2008-07-17 12:53:50 -0400811 node = tree_search(tree, file_offset);
812 if (!node)
813 goto out;
814
815 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
816 if (!offset_in_entry(entry, file_offset))
817 entry = NULL;
818 if (entry)
819 atomic_inc(&entry->refs);
820out:
Josef Bacik5fd02042012-05-02 14:00:54 -0400821 spin_unlock_irq(&tree->lock);
Chris Masone6dcd2d2008-07-17 12:53:50 -0400822 return entry;
823}
824
Josef Bacik4b46fce2010-05-23 11:00:55 -0400825/* Since the DIO code tries to lock a wide area we need to look for any ordered
826 * extents that exist in the range, rather than just the start of the range.
827 */
828struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
829 u64 file_offset,
830 u64 len)
831{
832 struct btrfs_ordered_inode_tree *tree;
833 struct rb_node *node;
834 struct btrfs_ordered_extent *entry = NULL;
835
836 tree = &BTRFS_I(inode)->ordered_tree;
Josef Bacik5fd02042012-05-02 14:00:54 -0400837 spin_lock_irq(&tree->lock);
Josef Bacik4b46fce2010-05-23 11:00:55 -0400838 node = tree_search(tree, file_offset);
839 if (!node) {
840 node = tree_search(tree, file_offset + len);
841 if (!node)
842 goto out;
843 }
844
845 while (1) {
846 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
847 if (range_overlaps(entry, file_offset, len))
848 break;
849
850 if (entry->file_offset >= file_offset + len) {
851 entry = NULL;
852 break;
853 }
854 entry = NULL;
855 node = rb_next(node);
856 if (!node)
857 break;
858 }
859out:
860 if (entry)
861 atomic_inc(&entry->refs);
Josef Bacik5fd02042012-05-02 14:00:54 -0400862 spin_unlock_irq(&tree->lock);
Josef Bacik4b46fce2010-05-23 11:00:55 -0400863 return entry;
864}
865
Chris Masoneb84ae02008-07-17 13:53:27 -0400866/*
867 * lookup and return any extent before 'file_offset'. NULL is returned
868 * if none is found
869 */
Chris Masone6dcd2d2008-07-17 12:53:50 -0400870struct btrfs_ordered_extent *
Chris Masond3977122009-01-05 21:25:51 -0500871btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
Chris Masone6dcd2d2008-07-17 12:53:50 -0400872{
873 struct btrfs_ordered_inode_tree *tree;
874 struct rb_node *node;
875 struct btrfs_ordered_extent *entry = NULL;
876
877 tree = &BTRFS_I(inode)->ordered_tree;
Josef Bacik5fd02042012-05-02 14:00:54 -0400878 spin_lock_irq(&tree->lock);
Chris Masone6dcd2d2008-07-17 12:53:50 -0400879 node = tree_search(tree, file_offset);
880 if (!node)
881 goto out;
882
883 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
884 atomic_inc(&entry->refs);
885out:
Josef Bacik5fd02042012-05-02 14:00:54 -0400886 spin_unlock_irq(&tree->lock);
Chris Masone6dcd2d2008-07-17 12:53:50 -0400887 return entry;
888}
Chris Masondbe674a2008-07-17 12:54:05 -0400889
Chris Masoneb84ae02008-07-17 13:53:27 -0400890/*
891 * After an extent is done, call this to conditionally update the on disk
892 * i_size. i_size is updated to cover any fully written part of the file.
893 */
Yan, Zhengc2167752009-11-12 09:34:21 +0000894int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
Chris Masondbe674a2008-07-17 12:54:05 -0400895 struct btrfs_ordered_extent *ordered)
896{
897 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
Chris Masondbe674a2008-07-17 12:54:05 -0400898 u64 disk_i_size;
899 u64 new_i_size;
Yan, Zhengc2167752009-11-12 09:34:21 +0000900 u64 i_size = i_size_read(inode);
Chris Masondbe674a2008-07-17 12:54:05 -0400901 struct rb_node *node;
Yan, Zhengc2167752009-11-12 09:34:21 +0000902 struct rb_node *prev = NULL;
Chris Masondbe674a2008-07-17 12:54:05 -0400903 struct btrfs_ordered_extent *test;
Yan, Zhengc2167752009-11-12 09:34:21 +0000904 int ret = 1;
905
Josef Bacik5fd02042012-05-02 14:00:54 -0400906 spin_lock_irq(&tree->lock);
Josef Bacik77cef2e2013-08-29 13:57:21 -0400907 if (ordered) {
908 offset = entry_end(ordered);
909 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags))
910 offset = min(offset,
911 ordered->file_offset +
912 ordered->truncated_len);
913 } else {
914 offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
915 }
Chris Masondbe674a2008-07-17 12:54:05 -0400916 disk_i_size = BTRFS_I(inode)->disk_i_size;
917
Yan, Zhengc2167752009-11-12 09:34:21 +0000918 /* truncate file */
919 if (disk_i_size > i_size) {
920 BTRFS_I(inode)->disk_i_size = i_size;
921 ret = 0;
922 goto out;
923 }
924
Chris Masondbe674a2008-07-17 12:54:05 -0400925 /*
926 * if the disk i_size is already at the inode->i_size, or
927 * this ordered extent is inside the disk i_size, we're done
928 */
Josef Bacik5d1f4022013-01-30 14:17:31 -0500929 if (disk_i_size == i_size)
Chris Masondbe674a2008-07-17 12:54:05 -0400930 goto out;
Josef Bacik5d1f4022013-01-30 14:17:31 -0500931
932 /*
933 * We still need to update disk_i_size if outstanding_isize is greater
934 * than disk_i_size.
935 */
936 if (offset <= disk_i_size &&
937 (!ordered || ordered->outstanding_isize <= disk_i_size))
938 goto out;
Chris Masondbe674a2008-07-17 12:54:05 -0400939
940 /*
Chris Masondbe674a2008-07-17 12:54:05 -0400941 * walk backward from this ordered extent to disk_i_size.
942 * if we find an ordered extent then we can't update disk i_size
943 * yet
944 */
Yan, Zhengc2167752009-11-12 09:34:21 +0000945 if (ordered) {
946 node = rb_prev(&ordered->rb_node);
947 } else {
948 prev = tree_search(tree, offset);
949 /*
950 * we insert file extents without involving ordered struct,
951 * so there should be no ordered struct cover this offset
952 */
953 if (prev) {
954 test = rb_entry(prev, struct btrfs_ordered_extent,
955 rb_node);
956 BUG_ON(offset_in_entry(test, offset));
957 }
958 node = prev;
959 }
Josef Bacik5fd02042012-05-02 14:00:54 -0400960 for (; node; node = rb_prev(node)) {
Chris Masondbe674a2008-07-17 12:54:05 -0400961 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
Josef Bacik5fd02042012-05-02 14:00:54 -0400962
963 /* We treat this entry as if it doesnt exist */
964 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
965 continue;
Chris Masondbe674a2008-07-17 12:54:05 -0400966 if (test->file_offset + test->len <= disk_i_size)
967 break;
Yan, Zhengc2167752009-11-12 09:34:21 +0000968 if (test->file_offset >= i_size)
Chris Masondbe674a2008-07-17 12:54:05 -0400969 break;
Josef Bacik59fe4f42013-01-30 14:31:31 -0500970 if (entry_end(test) > disk_i_size) {
Miao Xieb9a8cc52012-09-06 04:01:21 -0600971 /*
972 * we don't update disk_i_size now, so record this
973 * undealt i_size. Or we will not know the real
974 * i_size.
975 */
976 if (test->outstanding_isize < offset)
977 test->outstanding_isize = offset;
978 if (ordered &&
979 ordered->outstanding_isize >
980 test->outstanding_isize)
981 test->outstanding_isize =
982 ordered->outstanding_isize;
Chris Masondbe674a2008-07-17 12:54:05 -0400983 goto out;
Miao Xieb9a8cc52012-09-06 04:01:21 -0600984 }
Chris Masondbe674a2008-07-17 12:54:05 -0400985 }
Yan, Zhengc2167752009-11-12 09:34:21 +0000986 new_i_size = min_t(u64, offset, i_size);
Chris Masondbe674a2008-07-17 12:54:05 -0400987
988 /*
Miao Xieb9a8cc52012-09-06 04:01:21 -0600989 * Some ordered extents may completed before the current one, and
990 * we hold the real i_size in ->outstanding_isize.
Chris Masondbe674a2008-07-17 12:54:05 -0400991 */
Miao Xieb9a8cc52012-09-06 04:01:21 -0600992 if (ordered && ordered->outstanding_isize > new_i_size)
993 new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
Chris Masondbe674a2008-07-17 12:54:05 -0400994 BTRFS_I(inode)->disk_i_size = new_i_size;
Yan, Zhengc2167752009-11-12 09:34:21 +0000995 ret = 0;
Chris Masondbe674a2008-07-17 12:54:05 -0400996out:
Yan, Zhengc2167752009-11-12 09:34:21 +0000997 /*
Josef Bacik5fd02042012-05-02 14:00:54 -0400998 * We need to do this because we can't remove ordered extents until
999 * after the i_disk_size has been updated and then the inode has been
1000 * updated to reflect the change, so we need to tell anybody who finds
1001 * this ordered extent that we've already done all the real work, we
1002 * just haven't completed all the other work.
Yan, Zhengc2167752009-11-12 09:34:21 +00001003 */
1004 if (ordered)
Josef Bacik5fd02042012-05-02 14:00:54 -04001005 set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
1006 spin_unlock_irq(&tree->lock);
Yan, Zhengc2167752009-11-12 09:34:21 +00001007 return ret;
Chris Masondbe674a2008-07-17 12:54:05 -04001008}
Chris Masonba1da2f2008-07-17 12:54:15 -04001009
Chris Masoneb84ae02008-07-17 13:53:27 -04001010/*
1011 * search the ordered extents for one corresponding to 'offset' and
1012 * try to find a checksum. This is used because we allow pages to
1013 * be reclaimed before their checksum is actually put into the btree
1014 */
Chris Masond20f7042008-12-08 16:58:54 -05001015int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
Miao Xiee4100d92013-04-05 07:20:56 +00001016 u32 *sum, int len)
Chris Masonba1da2f2008-07-17 12:54:15 -04001017{
1018 struct btrfs_ordered_sum *ordered_sum;
Chris Masonba1da2f2008-07-17 12:54:15 -04001019 struct btrfs_ordered_extent *ordered;
1020 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
Chris Mason3edf7d32008-07-18 06:17:13 -04001021 unsigned long num_sectors;
1022 unsigned long i;
1023 u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
Miao Xiee4100d92013-04-05 07:20:56 +00001024 int index = 0;
Chris Masonba1da2f2008-07-17 12:54:15 -04001025
1026 ordered = btrfs_lookup_ordered_extent(inode, offset);
1027 if (!ordered)
Miao Xiee4100d92013-04-05 07:20:56 +00001028 return 0;
Chris Masonba1da2f2008-07-17 12:54:15 -04001029
Josef Bacik5fd02042012-05-02 14:00:54 -04001030 spin_lock_irq(&tree->lock);
Qinghuang Fengc6e30872009-01-21 10:59:08 -05001031 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
Miao Xiee4100d92013-04-05 07:20:56 +00001032 if (disk_bytenr >= ordered_sum->bytenr &&
1033 disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
1034 i = (disk_bytenr - ordered_sum->bytenr) >>
1035 inode->i_sb->s_blocksize_bits;
Miao Xiee4100d92013-04-05 07:20:56 +00001036 num_sectors = ordered_sum->len >>
1037 inode->i_sb->s_blocksize_bits;
Miao Xief51a4a12013-06-19 10:36:09 +08001038 num_sectors = min_t(int, len - index, num_sectors - i);
1039 memcpy(sum + index, ordered_sum->sums + i,
1040 num_sectors);
1041
1042 index += (int)num_sectors;
1043 if (index == len)
1044 goto out;
1045 disk_bytenr += num_sectors * sectorsize;
Chris Masonba1da2f2008-07-17 12:54:15 -04001046 }
1047 }
1048out:
Josef Bacik5fd02042012-05-02 14:00:54 -04001049 spin_unlock_irq(&tree->lock);
Chris Mason89642222008-07-24 09:41:53 -04001050 btrfs_put_ordered_extent(ordered);
Miao Xiee4100d92013-04-05 07:20:56 +00001051 return index;
Chris Masonba1da2f2008-07-17 12:54:15 -04001052}
1053
Chris Masonf4219502008-07-22 11:18:09 -04001054
Chris Mason5a3f23d2009-03-31 13:27:11 -04001055/*
1056 * add a given inode to the list of inodes that must be fully on
1057 * disk before a transaction commit finishes.
1058 *
1059 * This basically gives us the ext3 style data=ordered mode, and it is mostly
1060 * used to make sure renamed files are fully on disk.
1061 *
1062 * It is a noop if the inode is already fully on disk.
1063 *
1064 * If trans is not null, we'll do a friendly check for a transaction that
1065 * is already flushing things and force the IO down ourselves.
1066 */
Jeff Mahoney143bede2012-03-01 14:56:26 +01001067void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
1068 struct btrfs_root *root, struct inode *inode)
Chris Mason5a3f23d2009-03-31 13:27:11 -04001069{
Josef Bacik569e0f32013-02-13 11:09:14 -05001070 struct btrfs_transaction *cur_trans = trans->transaction;
Chris Mason5a3f23d2009-03-31 13:27:11 -04001071 u64 last_mod;
1072
1073 last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
1074
1075 /*
1076 * if this file hasn't been changed since the last transaction
1077 * commit, we can safely return without doing anything
1078 */
1079 if (last_mod < root->fs_info->last_trans_committed)
Jeff Mahoney143bede2012-03-01 14:56:26 +01001080 return;
Chris Mason5a3f23d2009-03-31 13:27:11 -04001081
Miao Xie199c2a92013-05-15 07:48:23 +00001082 spin_lock(&root->fs_info->ordered_root_lock);
Chris Mason5a3f23d2009-03-31 13:27:11 -04001083 if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
1084 list_add_tail(&BTRFS_I(inode)->ordered_operations,
Josef Bacik569e0f32013-02-13 11:09:14 -05001085 &cur_trans->ordered_operations);
Chris Mason5a3f23d2009-03-31 13:27:11 -04001086 }
Miao Xie199c2a92013-05-15 07:48:23 +00001087 spin_unlock(&root->fs_info->ordered_root_lock);
Chris Mason5a3f23d2009-03-31 13:27:11 -04001088}
Miao Xie6352b912012-09-06 04:01:51 -06001089
1090int __init ordered_data_init(void)
1091{
1092 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1093 sizeof(struct btrfs_ordered_extent), 0,
1094 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
1095 NULL);
1096 if (!btrfs_ordered_extent_cache)
1097 return -ENOMEM;
Miao Xie25287e02012-10-25 09:31:03 +00001098
Miao Xie6352b912012-09-06 04:01:51 -06001099 return 0;
1100}
1101
1102void ordered_data_exit(void)
1103{
1104 if (btrfs_ordered_extent_cache)
1105 kmem_cache_destroy(btrfs_ordered_extent_cache);
1106}