Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2009 Oracle. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public |
| 6 | * License v2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public |
| 14 | * License along with this program; if not, write to the |
| 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
| 16 | * Boston, MA 021110-1307, USA. |
| 17 | */ |
| 18 | |
| 19 | #include <linux/sched.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 20 | #include <linux/slab.h> |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 21 | #include <linux/sort.h> |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 22 | #include "ctree.h" |
| 23 | #include "delayed-ref.h" |
| 24 | #include "transaction.h" |
Qu Wenruo | 3368d00 | 2015-04-16 14:34:17 +0800 | [diff] [blame] | 25 | #include "qgroup.h" |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 26 | |
Miao Xie | 78a6184 | 2012-11-21 02:21:28 +0000 | [diff] [blame] | 27 | struct kmem_cache *btrfs_delayed_ref_head_cachep; |
| 28 | struct kmem_cache *btrfs_delayed_tree_ref_cachep; |
| 29 | struct kmem_cache *btrfs_delayed_data_ref_cachep; |
| 30 | struct kmem_cache *btrfs_delayed_extent_op_cachep; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 31 | /* |
| 32 | * delayed back reference update tracking. For subvolume trees |
| 33 | * we queue up extent allocations and backref maintenance for |
| 34 | * delayed processing. This avoids deep call chains where we |
| 35 | * add extents in the middle of btrfs_search_slot, and it allows |
| 36 | * us to buffer up frequently modified backrefs in an rb tree instead |
| 37 | * of hammering updates on the extent allocation tree. |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 38 | */ |
| 39 | |
| 40 | /* |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 41 | * compare two delayed tree backrefs with same bytenr and type |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 42 | */ |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 43 | static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2, |
Josef Bacik | 41b0fc4 | 2013-04-01 20:36:28 -0400 | [diff] [blame] | 44 | struct btrfs_delayed_tree_ref *ref1, int type) |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 45 | { |
Josef Bacik | 41b0fc4 | 2013-04-01 20:36:28 -0400 | [diff] [blame] | 46 | if (type == BTRFS_TREE_BLOCK_REF_KEY) { |
| 47 | if (ref1->root < ref2->root) |
| 48 | return -1; |
| 49 | if (ref1->root > ref2->root) |
| 50 | return 1; |
| 51 | } else { |
| 52 | if (ref1->parent < ref2->parent) |
| 53 | return -1; |
| 54 | if (ref1->parent > ref2->parent) |
| 55 | return 1; |
| 56 | } |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 57 | return 0; |
| 58 | } |
| 59 | |
| 60 | /* |
| 61 | * compare two delayed data backrefs with same bytenr and type |
| 62 | */ |
| 63 | static int comp_data_refs(struct btrfs_delayed_data_ref *ref2, |
| 64 | struct btrfs_delayed_data_ref *ref1) |
| 65 | { |
| 66 | if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) { |
| 67 | if (ref1->root < ref2->root) |
| 68 | return -1; |
| 69 | if (ref1->root > ref2->root) |
| 70 | return 1; |
| 71 | if (ref1->objectid < ref2->objectid) |
| 72 | return -1; |
| 73 | if (ref1->objectid > ref2->objectid) |
| 74 | return 1; |
| 75 | if (ref1->offset < ref2->offset) |
| 76 | return -1; |
| 77 | if (ref1->offset > ref2->offset) |
| 78 | return 1; |
| 79 | } else { |
| 80 | if (ref1->parent < ref2->parent) |
| 81 | return -1; |
| 82 | if (ref1->parent > ref2->parent) |
| 83 | return 1; |
| 84 | } |
| 85 | return 0; |
| 86 | } |
| 87 | |
Liu Bo | c46effa | 2013-10-14 12:59:45 +0800 | [diff] [blame] | 88 | /* insert a new ref to head ref rbtree */ |
| 89 | static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root, |
| 90 | struct rb_node *node) |
| 91 | { |
| 92 | struct rb_node **p = &root->rb_node; |
| 93 | struct rb_node *parent_node = NULL; |
| 94 | struct btrfs_delayed_ref_head *entry; |
| 95 | struct btrfs_delayed_ref_head *ins; |
| 96 | u64 bytenr; |
| 97 | |
| 98 | ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node); |
| 99 | bytenr = ins->node.bytenr; |
| 100 | while (*p) { |
| 101 | parent_node = *p; |
| 102 | entry = rb_entry(parent_node, struct btrfs_delayed_ref_head, |
| 103 | href_node); |
| 104 | |
| 105 | if (bytenr < entry->node.bytenr) |
| 106 | p = &(*p)->rb_left; |
| 107 | else if (bytenr > entry->node.bytenr) |
| 108 | p = &(*p)->rb_right; |
| 109 | else |
| 110 | return entry; |
| 111 | } |
| 112 | |
| 113 | rb_link_node(node, parent_node, p); |
| 114 | rb_insert_color(node, root); |
| 115 | return NULL; |
| 116 | } |
| 117 | |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 118 | /* |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 119 | * find an head entry based on bytenr. This returns the delayed ref |
Arne Jansen | d1270cd | 2011-09-13 15:16:43 +0200 | [diff] [blame] | 120 | * head if it was able to find one, or NULL if nothing was in that spot. |
| 121 | * If return_bigger is given, the next bigger entry is returned if no exact |
| 122 | * match is found. |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 123 | */ |
Liu Bo | c46effa | 2013-10-14 12:59:45 +0800 | [diff] [blame] | 124 | static struct btrfs_delayed_ref_head * |
| 125 | find_ref_head(struct rb_root *root, u64 bytenr, |
Filipe Manana | 85fdfdf | 2014-02-12 15:07:53 +0000 | [diff] [blame] | 126 | int return_bigger) |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 127 | { |
Arne Jansen | d1270cd | 2011-09-13 15:16:43 +0200 | [diff] [blame] | 128 | struct rb_node *n; |
Liu Bo | c46effa | 2013-10-14 12:59:45 +0800 | [diff] [blame] | 129 | struct btrfs_delayed_ref_head *entry; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 130 | |
Arne Jansen | d1270cd | 2011-09-13 15:16:43 +0200 | [diff] [blame] | 131 | n = root->rb_node; |
| 132 | entry = NULL; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 133 | while (n) { |
Liu Bo | c46effa | 2013-10-14 12:59:45 +0800 | [diff] [blame] | 134 | entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node); |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 135 | |
Liu Bo | c46effa | 2013-10-14 12:59:45 +0800 | [diff] [blame] | 136 | if (bytenr < entry->node.bytenr) |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 137 | n = n->rb_left; |
Filipe Manana | 85fdfdf | 2014-02-12 15:07:53 +0000 | [diff] [blame] | 138 | else if (bytenr > entry->node.bytenr) |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 139 | n = n->rb_right; |
| 140 | else |
| 141 | return entry; |
| 142 | } |
Arne Jansen | d1270cd | 2011-09-13 15:16:43 +0200 | [diff] [blame] | 143 | if (entry && return_bigger) { |
Filipe Manana | 85fdfdf | 2014-02-12 15:07:53 +0000 | [diff] [blame] | 144 | if (bytenr > entry->node.bytenr) { |
Liu Bo | c46effa | 2013-10-14 12:59:45 +0800 | [diff] [blame] | 145 | n = rb_next(&entry->href_node); |
Arne Jansen | d1270cd | 2011-09-13 15:16:43 +0200 | [diff] [blame] | 146 | if (!n) |
| 147 | n = rb_first(root); |
Liu Bo | c46effa | 2013-10-14 12:59:45 +0800 | [diff] [blame] | 148 | entry = rb_entry(n, struct btrfs_delayed_ref_head, |
| 149 | href_node); |
Filipe Manana | 6103fb4 | 2014-02-12 15:07:52 +0000 | [diff] [blame] | 150 | return entry; |
Arne Jansen | d1270cd | 2011-09-13 15:16:43 +0200 | [diff] [blame] | 151 | } |
| 152 | return entry; |
| 153 | } |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 154 | return NULL; |
| 155 | } |
| 156 | |
Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 157 | int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, |
| 158 | struct btrfs_delayed_ref_head *head) |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 159 | { |
Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 160 | struct btrfs_delayed_ref_root *delayed_refs; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 161 | |
Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 162 | delayed_refs = &trans->transaction->delayed_refs; |
| 163 | assert_spin_locked(&delayed_refs->lock); |
| 164 | if (mutex_trylock(&head->mutex)) |
| 165 | return 0; |
| 166 | |
| 167 | atomic_inc(&head->node.refs); |
| 168 | spin_unlock(&delayed_refs->lock); |
| 169 | |
| 170 | mutex_lock(&head->mutex); |
| 171 | spin_lock(&delayed_refs->lock); |
| 172 | if (!head->node.in_tree) { |
| 173 | mutex_unlock(&head->mutex); |
| 174 | btrfs_put_delayed_ref(&head->node); |
| 175 | return -EAGAIN; |
| 176 | } |
| 177 | btrfs_put_delayed_ref(&head->node); |
| 178 | return 0; |
| 179 | } |
| 180 | |
Stefan Behrens | 35a3621 | 2013-08-14 18:12:25 +0200 | [diff] [blame] | 181 | static inline void drop_delayed_ref(struct btrfs_trans_handle *trans, |
Josef Bacik | ae1e206 | 2012-08-07 16:00:32 -0400 | [diff] [blame] | 182 | struct btrfs_delayed_ref_root *delayed_refs, |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 183 | struct btrfs_delayed_ref_head *head, |
Josef Bacik | ae1e206 | 2012-08-07 16:00:32 -0400 | [diff] [blame] | 184 | struct btrfs_delayed_ref_node *ref) |
| 185 | { |
Liu Bo | c46effa | 2013-10-14 12:59:45 +0800 | [diff] [blame] | 186 | if (btrfs_delayed_ref_is_head(ref)) { |
Liu Bo | c46effa | 2013-10-14 12:59:45 +0800 | [diff] [blame] | 187 | head = btrfs_delayed_node_to_head(ref); |
| 188 | rb_erase(&head->href_node, &delayed_refs->href_root); |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 189 | } else { |
| 190 | assert_spin_locked(&head->lock); |
Qu Wenruo | c6fc245 | 2015-03-30 17:03:00 +0800 | [diff] [blame] | 191 | list_del(&ref->list); |
Liu Bo | c46effa | 2013-10-14 12:59:45 +0800 | [diff] [blame] | 192 | } |
Josef Bacik | ae1e206 | 2012-08-07 16:00:32 -0400 | [diff] [blame] | 193 | ref->in_tree = 0; |
| 194 | btrfs_put_delayed_ref(ref); |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 195 | atomic_dec(&delayed_refs->num_entries); |
Josef Bacik | ae1e206 | 2012-08-07 16:00:32 -0400 | [diff] [blame] | 196 | if (trans->delayed_ref_updates) |
| 197 | trans->delayed_ref_updates--; |
| 198 | } |
| 199 | |
Filipe Manana | 2c3cf7d | 2015-10-22 09:47:34 +0100 | [diff] [blame] | 200 | static bool merge_ref(struct btrfs_trans_handle *trans, |
| 201 | struct btrfs_delayed_ref_root *delayed_refs, |
| 202 | struct btrfs_delayed_ref_head *head, |
| 203 | struct btrfs_delayed_ref_node *ref, |
| 204 | u64 seq) |
| 205 | { |
| 206 | struct btrfs_delayed_ref_node *next; |
| 207 | bool done = false; |
| 208 | |
| 209 | next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node, |
| 210 | list); |
| 211 | while (!done && &next->list != &head->ref_list) { |
| 212 | int mod; |
| 213 | struct btrfs_delayed_ref_node *next2; |
| 214 | |
| 215 | next2 = list_next_entry(next, list); |
| 216 | |
| 217 | if (next == ref) |
| 218 | goto next; |
| 219 | |
| 220 | if (seq && next->seq >= seq) |
| 221 | goto next; |
| 222 | |
Filipe Manana | b06c4bf | 2015-10-23 07:52:54 +0100 | [diff] [blame] | 223 | if (next->type != ref->type) |
Filipe Manana | 2c3cf7d | 2015-10-22 09:47:34 +0100 | [diff] [blame] | 224 | goto next; |
| 225 | |
| 226 | if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY || |
| 227 | ref->type == BTRFS_SHARED_BLOCK_REF_KEY) && |
| 228 | comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref), |
| 229 | btrfs_delayed_node_to_tree_ref(next), |
| 230 | ref->type)) |
| 231 | goto next; |
| 232 | if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY || |
| 233 | ref->type == BTRFS_SHARED_DATA_REF_KEY) && |
| 234 | comp_data_refs(btrfs_delayed_node_to_data_ref(ref), |
| 235 | btrfs_delayed_node_to_data_ref(next))) |
| 236 | goto next; |
| 237 | |
| 238 | if (ref->action == next->action) { |
| 239 | mod = next->ref_mod; |
| 240 | } else { |
| 241 | if (ref->ref_mod < next->ref_mod) { |
| 242 | swap(ref, next); |
| 243 | done = true; |
| 244 | } |
| 245 | mod = -next->ref_mod; |
| 246 | } |
| 247 | |
| 248 | drop_delayed_ref(trans, delayed_refs, head, next); |
| 249 | ref->ref_mod += mod; |
| 250 | if (ref->ref_mod == 0) { |
| 251 | drop_delayed_ref(trans, delayed_refs, head, ref); |
| 252 | done = true; |
| 253 | } else { |
| 254 | /* |
| 255 | * Can't have multiples of the same ref on a tree block. |
| 256 | */ |
| 257 | WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY || |
| 258 | ref->type == BTRFS_SHARED_BLOCK_REF_KEY); |
| 259 | } |
| 260 | next: |
| 261 | next = next2; |
| 262 | } |
| 263 | |
| 264 | return done; |
| 265 | } |
| 266 | |
| 267 | void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, |
| 268 | struct btrfs_fs_info *fs_info, |
| 269 | struct btrfs_delayed_ref_root *delayed_refs, |
| 270 | struct btrfs_delayed_ref_head *head) |
| 271 | { |
| 272 | struct btrfs_delayed_ref_node *ref; |
| 273 | u64 seq = 0; |
| 274 | |
| 275 | assert_spin_locked(&head->lock); |
| 276 | |
| 277 | if (list_empty(&head->ref_list)) |
| 278 | return; |
| 279 | |
| 280 | /* We don't have too many refs to merge for data. */ |
| 281 | if (head->is_data) |
| 282 | return; |
| 283 | |
| 284 | spin_lock(&fs_info->tree_mod_seq_lock); |
| 285 | if (!list_empty(&fs_info->tree_mod_seq_list)) { |
| 286 | struct seq_list *elem; |
| 287 | |
| 288 | elem = list_first_entry(&fs_info->tree_mod_seq_list, |
| 289 | struct seq_list, list); |
| 290 | seq = elem->seq; |
| 291 | } |
| 292 | spin_unlock(&fs_info->tree_mod_seq_lock); |
| 293 | |
| 294 | ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node, |
| 295 | list); |
| 296 | while (&ref->list != &head->ref_list) { |
| 297 | if (seq && ref->seq >= seq) |
| 298 | goto next; |
| 299 | |
| 300 | if (merge_ref(trans, delayed_refs, head, ref, seq)) { |
| 301 | if (list_empty(&head->ref_list)) |
| 302 | break; |
| 303 | ref = list_first_entry(&head->ref_list, |
| 304 | struct btrfs_delayed_ref_node, |
| 305 | list); |
| 306 | continue; |
| 307 | } |
| 308 | next: |
| 309 | ref = list_next_entry(ref, list); |
| 310 | } |
| 311 | } |
| 312 | |
Jan Schmidt | 097b8a7 | 2012-06-21 11:08:04 +0200 | [diff] [blame] | 313 | int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, |
| 314 | struct btrfs_delayed_ref_root *delayed_refs, |
Arne Jansen | 00f04b8 | 2011-09-14 12:37:00 +0200 | [diff] [blame] | 315 | u64 seq) |
| 316 | { |
| 317 | struct seq_list *elem; |
Jan Schmidt | 097b8a7 | 2012-06-21 11:08:04 +0200 | [diff] [blame] | 318 | int ret = 0; |
Arne Jansen | 00f04b8 | 2011-09-14 12:37:00 +0200 | [diff] [blame] | 319 | |
Jan Schmidt | 097b8a7 | 2012-06-21 11:08:04 +0200 | [diff] [blame] | 320 | spin_lock(&fs_info->tree_mod_seq_lock); |
| 321 | if (!list_empty(&fs_info->tree_mod_seq_list)) { |
| 322 | elem = list_first_entry(&fs_info->tree_mod_seq_list, |
| 323 | struct seq_list, list); |
| 324 | if (seq >= elem->seq) { |
Jan Schmidt | fc36ed7e | 2013-04-24 16:57:33 +0000 | [diff] [blame] | 325 | pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n", |
| 326 | (u32)(seq >> 32), (u32)seq, |
| 327 | (u32)(elem->seq >> 32), (u32)elem->seq, |
| 328 | delayed_refs); |
Jan Schmidt | 097b8a7 | 2012-06-21 11:08:04 +0200 | [diff] [blame] | 329 | ret = 1; |
| 330 | } |
Arne Jansen | 00f04b8 | 2011-09-14 12:37:00 +0200 | [diff] [blame] | 331 | } |
Jan Schmidt | 097b8a7 | 2012-06-21 11:08:04 +0200 | [diff] [blame] | 332 | |
| 333 | spin_unlock(&fs_info->tree_mod_seq_lock); |
| 334 | return ret; |
Arne Jansen | 00f04b8 | 2011-09-14 12:37:00 +0200 | [diff] [blame] | 335 | } |
| 336 | |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 337 | struct btrfs_delayed_ref_head * |
| 338 | btrfs_select_ref_head(struct btrfs_trans_handle *trans) |
Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 339 | { |
Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 340 | struct btrfs_delayed_ref_root *delayed_refs; |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 341 | struct btrfs_delayed_ref_head *head; |
| 342 | u64 start; |
| 343 | bool loop = false; |
Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 344 | |
| 345 | delayed_refs = &trans->transaction->delayed_refs; |
Liu Bo | c46effa | 2013-10-14 12:59:45 +0800 | [diff] [blame] | 346 | |
Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 347 | again: |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 348 | start = delayed_refs->run_delayed_start; |
Filipe Manana | 85fdfdf | 2014-02-12 15:07:53 +0000 | [diff] [blame] | 349 | head = find_ref_head(&delayed_refs->href_root, start, 1); |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 350 | if (!head && !loop) { |
| 351 | delayed_refs->run_delayed_start = 0; |
Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 352 | start = 0; |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 353 | loop = true; |
Filipe Manana | 85fdfdf | 2014-02-12 15:07:53 +0000 | [diff] [blame] | 354 | head = find_ref_head(&delayed_refs->href_root, start, 1); |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 355 | if (!head) |
| 356 | return NULL; |
| 357 | } else if (!head && loop) { |
| 358 | return NULL; |
Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 359 | } |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 360 | |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 361 | while (head->processing) { |
| 362 | struct rb_node *node; |
Miao Xie | 093486c | 2012-12-19 08:10:10 +0000 | [diff] [blame] | 363 | |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 364 | node = rb_next(&head->href_node); |
| 365 | if (!node) { |
| 366 | if (loop) |
| 367 | return NULL; |
| 368 | delayed_refs->run_delayed_start = 0; |
| 369 | start = 0; |
| 370 | loop = true; |
| 371 | goto again; |
| 372 | } |
| 373 | head = rb_entry(node, struct btrfs_delayed_ref_head, |
| 374 | href_node); |
| 375 | } |
| 376 | |
| 377 | head->processing = 1; |
| 378 | WARN_ON(delayed_refs->num_heads_ready == 0); |
| 379 | delayed_refs->num_heads_ready--; |
| 380 | delayed_refs->run_delayed_start = head->node.bytenr + |
| 381 | head->node.num_bytes; |
| 382 | return head; |
Miao Xie | 093486c | 2012-12-19 08:10:10 +0000 | [diff] [blame] | 383 | } |
| 384 | |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 385 | /* |
Qu Wenruo | c6fc245 | 2015-03-30 17:03:00 +0800 | [diff] [blame] | 386 | * Helper to insert the ref_node to the tail or merge with tail. |
| 387 | * |
| 388 | * Return 0 for insert. |
| 389 | * Return >0 for merge. |
| 390 | */ |
| 391 | static int |
| 392 | add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans, |
| 393 | struct btrfs_delayed_ref_root *root, |
| 394 | struct btrfs_delayed_ref_head *href, |
| 395 | struct btrfs_delayed_ref_node *ref) |
| 396 | { |
| 397 | struct btrfs_delayed_ref_node *exist; |
| 398 | int mod; |
| 399 | int ret = 0; |
| 400 | |
| 401 | spin_lock(&href->lock); |
| 402 | /* Check whether we can merge the tail node with ref */ |
| 403 | if (list_empty(&href->ref_list)) |
| 404 | goto add_tail; |
| 405 | exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node, |
| 406 | list); |
| 407 | /* No need to compare bytenr nor is_head */ |
Filipe Manana | b06c4bf | 2015-10-23 07:52:54 +0100 | [diff] [blame] | 408 | if (exist->type != ref->type || exist->seq != ref->seq) |
Qu Wenruo | c6fc245 | 2015-03-30 17:03:00 +0800 | [diff] [blame] | 409 | goto add_tail; |
| 410 | |
| 411 | if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY || |
| 412 | exist->type == BTRFS_SHARED_BLOCK_REF_KEY) && |
| 413 | comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist), |
| 414 | btrfs_delayed_node_to_tree_ref(ref), |
| 415 | ref->type)) |
| 416 | goto add_tail; |
| 417 | if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY || |
| 418 | exist->type == BTRFS_SHARED_DATA_REF_KEY) && |
| 419 | comp_data_refs(btrfs_delayed_node_to_data_ref(exist), |
| 420 | btrfs_delayed_node_to_data_ref(ref))) |
| 421 | goto add_tail; |
| 422 | |
| 423 | /* Now we are sure we can merge */ |
| 424 | ret = 1; |
| 425 | if (exist->action == ref->action) { |
| 426 | mod = ref->ref_mod; |
| 427 | } else { |
| 428 | /* Need to change action */ |
| 429 | if (exist->ref_mod < ref->ref_mod) { |
| 430 | exist->action = ref->action; |
| 431 | mod = -exist->ref_mod; |
| 432 | exist->ref_mod = ref->ref_mod; |
| 433 | } else |
| 434 | mod = -ref->ref_mod; |
| 435 | } |
| 436 | exist->ref_mod += mod; |
| 437 | |
| 438 | /* remove existing tail if its ref_mod is zero */ |
| 439 | if (exist->ref_mod == 0) |
| 440 | drop_delayed_ref(trans, root, href, exist); |
| 441 | spin_unlock(&href->lock); |
| 442 | return ret; |
| 443 | |
| 444 | add_tail: |
| 445 | list_add_tail(&ref->list, &href->ref_list); |
| 446 | atomic_inc(&root->num_entries); |
| 447 | trans->delayed_ref_updates++; |
| 448 | spin_unlock(&href->lock); |
| 449 | return ret; |
| 450 | } |
| 451 | |
| 452 | /* |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 453 | * helper function to update the accounting in the head ref |
| 454 | * existing and update must have the same bytenr |
| 455 | */ |
| 456 | static noinline void |
Josef Bacik | 1262133 | 2015-02-03 07:50:16 -0800 | [diff] [blame] | 457 | update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs, |
| 458 | struct btrfs_delayed_ref_node *existing, |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 459 | struct btrfs_delayed_ref_node *update) |
| 460 | { |
| 461 | struct btrfs_delayed_ref_head *existing_ref; |
| 462 | struct btrfs_delayed_ref_head *ref; |
Josef Bacik | 1262133 | 2015-02-03 07:50:16 -0800 | [diff] [blame] | 463 | int old_ref_mod; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 464 | |
| 465 | existing_ref = btrfs_delayed_node_to_head(existing); |
| 466 | ref = btrfs_delayed_node_to_head(update); |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 467 | BUG_ON(existing_ref->is_data != ref->is_data); |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 468 | |
Filipe Manana | 21543ba | 2014-03-14 20:55:01 +0000 | [diff] [blame] | 469 | spin_lock(&existing_ref->lock); |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 470 | if (ref->must_insert_reserved) { |
| 471 | /* if the extent was freed and then |
| 472 | * reallocated before the delayed ref |
| 473 | * entries were processed, we can end up |
| 474 | * with an existing head ref without |
| 475 | * the must_insert_reserved flag set. |
| 476 | * Set it again here |
| 477 | */ |
| 478 | existing_ref->must_insert_reserved = ref->must_insert_reserved; |
| 479 | |
| 480 | /* |
| 481 | * update the num_bytes so we make sure the accounting |
| 482 | * is done correctly |
| 483 | */ |
| 484 | existing->num_bytes = update->num_bytes; |
| 485 | |
| 486 | } |
| 487 | |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 488 | if (ref->extent_op) { |
| 489 | if (!existing_ref->extent_op) { |
| 490 | existing_ref->extent_op = ref->extent_op; |
| 491 | } else { |
| 492 | if (ref->extent_op->update_key) { |
| 493 | memcpy(&existing_ref->extent_op->key, |
| 494 | &ref->extent_op->key, |
| 495 | sizeof(ref->extent_op->key)); |
| 496 | existing_ref->extent_op->update_key = 1; |
| 497 | } |
| 498 | if (ref->extent_op->update_flags) { |
| 499 | existing_ref->extent_op->flags_to_set |= |
| 500 | ref->extent_op->flags_to_set; |
| 501 | existing_ref->extent_op->update_flags = 1; |
| 502 | } |
Miao Xie | 78a6184 | 2012-11-21 02:21:28 +0000 | [diff] [blame] | 503 | btrfs_free_delayed_extent_op(ref->extent_op); |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 504 | } |
| 505 | } |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 506 | /* |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 507 | * update the reference mod on the head to reflect this new operation, |
| 508 | * only need the lock for this case cause we could be processing it |
| 509 | * currently, for refs we just added we know we're a-ok. |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 510 | */ |
Josef Bacik | 1262133 | 2015-02-03 07:50:16 -0800 | [diff] [blame] | 511 | old_ref_mod = existing_ref->total_ref_mod; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 512 | existing->ref_mod += update->ref_mod; |
Josef Bacik | 1262133 | 2015-02-03 07:50:16 -0800 | [diff] [blame] | 513 | existing_ref->total_ref_mod += update->ref_mod; |
| 514 | |
| 515 | /* |
| 516 | * If we are going to from a positive ref mod to a negative or vice |
| 517 | * versa we need to make sure to adjust pending_csums accordingly. |
| 518 | */ |
| 519 | if (existing_ref->is_data) { |
| 520 | if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0) |
| 521 | delayed_refs->pending_csums -= existing->num_bytes; |
| 522 | if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0) |
| 523 | delayed_refs->pending_csums += existing->num_bytes; |
| 524 | } |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 525 | spin_unlock(&existing_ref->lock); |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 526 | } |
| 527 | |
| 528 | /* |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 529 | * helper function to actually insert a head node into the rbtree. |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 530 | * this does all the dirty work in terms of maintaining the correct |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 531 | * overall modification count. |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 532 | */ |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 533 | static noinline struct btrfs_delayed_ref_head * |
| 534 | add_delayed_ref_head(struct btrfs_fs_info *fs_info, |
| 535 | struct btrfs_trans_handle *trans, |
Qu Wenruo | 3368d00 | 2015-04-16 14:34:17 +0800 | [diff] [blame] | 536 | struct btrfs_delayed_ref_node *ref, |
| 537 | struct btrfs_qgroup_extent_record *qrecord, |
Qu Wenruo | 5846a3c | 2015-10-26 14:11:18 +0800 | [diff] [blame] | 538 | u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved, |
| 539 | int action, int is_data) |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 540 | { |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 541 | struct btrfs_delayed_ref_head *existing; |
Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 542 | struct btrfs_delayed_ref_head *head_ref = NULL; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 543 | struct btrfs_delayed_ref_root *delayed_refs; |
Qu Wenruo | 3368d00 | 2015-04-16 14:34:17 +0800 | [diff] [blame] | 544 | struct btrfs_qgroup_extent_record *qexisting; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 545 | int count_mod = 1; |
| 546 | int must_insert_reserved = 0; |
| 547 | |
Qu Wenruo | 5846a3c | 2015-10-26 14:11:18 +0800 | [diff] [blame] | 548 | /* If reserved is provided, it must be a data extent. */ |
| 549 | BUG_ON(!is_data && reserved); |
| 550 | |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 551 | /* |
| 552 | * the head node stores the sum of all the mods, so dropping a ref |
| 553 | * should drop the sum in the head node by one. |
| 554 | */ |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 555 | if (action == BTRFS_UPDATE_DELAYED_HEAD) |
| 556 | count_mod = 0; |
| 557 | else if (action == BTRFS_DROP_DELAYED_REF) |
| 558 | count_mod = -1; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 559 | |
| 560 | /* |
| 561 | * BTRFS_ADD_DELAYED_EXTENT means that we need to update |
| 562 | * the reserved accounting when the extent is finally added, or |
| 563 | * if a later modification deletes the delayed ref without ever |
| 564 | * inserting the extent into the extent allocation tree. |
| 565 | * ref->must_insert_reserved is the flag used to record |
| 566 | * that accounting mods are required. |
| 567 | * |
| 568 | * Once we record must_insert_reserved, switch the action to |
| 569 | * BTRFS_ADD_DELAYED_REF because other special casing is not required. |
| 570 | */ |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 571 | if (action == BTRFS_ADD_DELAYED_EXTENT) |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 572 | must_insert_reserved = 1; |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 573 | else |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 574 | must_insert_reserved = 0; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 575 | |
| 576 | delayed_refs = &trans->transaction->delayed_refs; |
| 577 | |
| 578 | /* first set the basic ref node struct up */ |
| 579 | atomic_set(&ref->refs, 1); |
| 580 | ref->bytenr = bytenr; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 581 | ref->num_bytes = num_bytes; |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 582 | ref->ref_mod = count_mod; |
| 583 | ref->type = 0; |
| 584 | ref->action = 0; |
| 585 | ref->is_head = 1; |
| 586 | ref->in_tree = 1; |
Arne Jansen | 00f04b8 | 2011-09-14 12:37:00 +0200 | [diff] [blame] | 587 | ref->seq = 0; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 588 | |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 589 | head_ref = btrfs_delayed_node_to_head(ref); |
| 590 | head_ref->must_insert_reserved = must_insert_reserved; |
| 591 | head_ref->is_data = is_data; |
Qu Wenruo | c6fc245 | 2015-03-30 17:03:00 +0800 | [diff] [blame] | 592 | INIT_LIST_HEAD(&head_ref->ref_list); |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 593 | head_ref->processing = 0; |
Josef Bacik | 1262133 | 2015-02-03 07:50:16 -0800 | [diff] [blame] | 594 | head_ref->total_ref_mod = count_mod; |
Qu Wenruo | f64d5ca | 2015-09-08 17:08:36 +0800 | [diff] [blame] | 595 | head_ref->qgroup_reserved = 0; |
| 596 | head_ref->qgroup_ref_root = 0; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 597 | |
Qu Wenruo | 3368d00 | 2015-04-16 14:34:17 +0800 | [diff] [blame] | 598 | /* Record qgroup extent info if provided */ |
| 599 | if (qrecord) { |
Qu Wenruo | 5846a3c | 2015-10-26 14:11:18 +0800 | [diff] [blame] | 600 | if (ref_root && reserved) { |
| 601 | head_ref->qgroup_ref_root = ref_root; |
| 602 | head_ref->qgroup_reserved = reserved; |
| 603 | } |
| 604 | |
Qu Wenruo | 3368d00 | 2015-04-16 14:34:17 +0800 | [diff] [blame] | 605 | qrecord->bytenr = bytenr; |
| 606 | qrecord->num_bytes = num_bytes; |
| 607 | qrecord->old_roots = NULL; |
| 608 | |
| 609 | qexisting = btrfs_qgroup_insert_dirty_extent(delayed_refs, |
| 610 | qrecord); |
| 611 | if (qexisting) |
| 612 | kfree(qrecord); |
| 613 | } |
| 614 | |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 615 | spin_lock_init(&head_ref->lock); |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 616 | mutex_init(&head_ref->mutex); |
| 617 | |
Liu Bo | 599c75e | 2013-07-16 19:03:36 +0800 | [diff] [blame] | 618 | trace_add_delayed_ref_head(ref, head_ref, action); |
liubo | 1abe9b8 | 2011-03-24 11:18:59 +0000 | [diff] [blame] | 619 | |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 620 | existing = htree_insert(&delayed_refs->href_root, |
| 621 | &head_ref->href_node); |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 622 | if (existing) { |
Qu Wenruo | 5846a3c | 2015-10-26 14:11:18 +0800 | [diff] [blame] | 623 | WARN_ON(ref_root && reserved && existing->qgroup_ref_root |
| 624 | && existing->qgroup_reserved); |
Josef Bacik | 1262133 | 2015-02-03 07:50:16 -0800 | [diff] [blame] | 625 | update_existing_head_ref(delayed_refs, &existing->node, ref); |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 626 | /* |
| 627 | * we've updated the existing ref, free the newly |
| 628 | * allocated ref |
| 629 | */ |
Miao Xie | 78a6184 | 2012-11-21 02:21:28 +0000 | [diff] [blame] | 630 | kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref); |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 631 | head_ref = existing; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 632 | } else { |
Josef Bacik | 1262133 | 2015-02-03 07:50:16 -0800 | [diff] [blame] | 633 | if (is_data && count_mod < 0) |
| 634 | delayed_refs->pending_csums += num_bytes; |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 635 | delayed_refs->num_heads++; |
| 636 | delayed_refs->num_heads_ready++; |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 637 | atomic_inc(&delayed_refs->num_entries); |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 638 | trans->delayed_ref_updates++; |
| 639 | } |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 640 | return head_ref; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 641 | } |
| 642 | |
| 643 | /* |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 644 | * helper to insert a delayed tree ref into the rbtree. |
| 645 | */ |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 646 | static noinline void |
| 647 | add_delayed_tree_ref(struct btrfs_fs_info *fs_info, |
| 648 | struct btrfs_trans_handle *trans, |
| 649 | struct btrfs_delayed_ref_head *head_ref, |
| 650 | struct btrfs_delayed_ref_node *ref, u64 bytenr, |
| 651 | u64 num_bytes, u64 parent, u64 ref_root, int level, |
Filipe Manana | b06c4bf | 2015-10-23 07:52:54 +0100 | [diff] [blame] | 652 | int action) |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 653 | { |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 654 | struct btrfs_delayed_tree_ref *full_ref; |
| 655 | struct btrfs_delayed_ref_root *delayed_refs; |
Arne Jansen | 00f04b8 | 2011-09-14 12:37:00 +0200 | [diff] [blame] | 656 | u64 seq = 0; |
Qu Wenruo | c6fc245 | 2015-03-30 17:03:00 +0800 | [diff] [blame] | 657 | int ret; |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 658 | |
| 659 | if (action == BTRFS_ADD_DELAYED_EXTENT) |
| 660 | action = BTRFS_ADD_DELAYED_REF; |
| 661 | |
Josef Bacik | fcebe45 | 2014-05-13 17:30:47 -0700 | [diff] [blame] | 662 | if (is_fstree(ref_root)) |
| 663 | seq = atomic64_read(&fs_info->tree_mod_seq); |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 664 | delayed_refs = &trans->transaction->delayed_refs; |
| 665 | |
| 666 | /* first set the basic ref node struct up */ |
| 667 | atomic_set(&ref->refs, 1); |
| 668 | ref->bytenr = bytenr; |
| 669 | ref->num_bytes = num_bytes; |
| 670 | ref->ref_mod = 1; |
| 671 | ref->action = action; |
| 672 | ref->is_head = 0; |
| 673 | ref->in_tree = 1; |
Arne Jansen | 00f04b8 | 2011-09-14 12:37:00 +0200 | [diff] [blame] | 674 | ref->seq = seq; |
| 675 | |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 676 | full_ref = btrfs_delayed_node_to_tree_ref(ref); |
Arne Jansen | eebe063 | 2011-09-14 14:01:24 +0200 | [diff] [blame] | 677 | full_ref->parent = parent; |
| 678 | full_ref->root = ref_root; |
| 679 | if (parent) |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 680 | ref->type = BTRFS_SHARED_BLOCK_REF_KEY; |
Arne Jansen | eebe063 | 2011-09-14 14:01:24 +0200 | [diff] [blame] | 681 | else |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 682 | ref->type = BTRFS_TREE_BLOCK_REF_KEY; |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 683 | full_ref->level = level; |
| 684 | |
Liu Bo | 599c75e | 2013-07-16 19:03:36 +0800 | [diff] [blame] | 685 | trace_add_delayed_tree_ref(ref, full_ref, action); |
liubo | 1abe9b8 | 2011-03-24 11:18:59 +0000 | [diff] [blame] | 686 | |
Qu Wenruo | c6fc245 | 2015-03-30 17:03:00 +0800 | [diff] [blame] | 687 | ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref); |
| 688 | |
| 689 | /* |
| 690 | * XXX: memory should be freed at the same level allocated. |
| 691 | * But bad practice is anywhere... Follow it now. Need cleanup. |
| 692 | */ |
| 693 | if (ret > 0) |
Miao Xie | 78a6184 | 2012-11-21 02:21:28 +0000 | [diff] [blame] | 694 | kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref); |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 695 | } |
| 696 | |
| 697 | /* |
| 698 | * helper to insert a delayed data ref into the rbtree. |
| 699 | */ |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 700 | static noinline void |
| 701 | add_delayed_data_ref(struct btrfs_fs_info *fs_info, |
| 702 | struct btrfs_trans_handle *trans, |
| 703 | struct btrfs_delayed_ref_head *head_ref, |
| 704 | struct btrfs_delayed_ref_node *ref, u64 bytenr, |
| 705 | u64 num_bytes, u64 parent, u64 ref_root, u64 owner, |
Filipe Manana | b06c4bf | 2015-10-23 07:52:54 +0100 | [diff] [blame] | 706 | u64 offset, int action) |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 707 | { |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 708 | struct btrfs_delayed_data_ref *full_ref; |
| 709 | struct btrfs_delayed_ref_root *delayed_refs; |
Arne Jansen | 00f04b8 | 2011-09-14 12:37:00 +0200 | [diff] [blame] | 710 | u64 seq = 0; |
Qu Wenruo | c6fc245 | 2015-03-30 17:03:00 +0800 | [diff] [blame] | 711 | int ret; |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 712 | |
| 713 | if (action == BTRFS_ADD_DELAYED_EXTENT) |
| 714 | action = BTRFS_ADD_DELAYED_REF; |
| 715 | |
| 716 | delayed_refs = &trans->transaction->delayed_refs; |
| 717 | |
Josef Bacik | fcebe45 | 2014-05-13 17:30:47 -0700 | [diff] [blame] | 718 | if (is_fstree(ref_root)) |
| 719 | seq = atomic64_read(&fs_info->tree_mod_seq); |
| 720 | |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 721 | /* first set the basic ref node struct up */ |
| 722 | atomic_set(&ref->refs, 1); |
| 723 | ref->bytenr = bytenr; |
| 724 | ref->num_bytes = num_bytes; |
| 725 | ref->ref_mod = 1; |
| 726 | ref->action = action; |
| 727 | ref->is_head = 0; |
| 728 | ref->in_tree = 1; |
Arne Jansen | 00f04b8 | 2011-09-14 12:37:00 +0200 | [diff] [blame] | 729 | ref->seq = seq; |
| 730 | |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 731 | full_ref = btrfs_delayed_node_to_data_ref(ref); |
Arne Jansen | eebe063 | 2011-09-14 14:01:24 +0200 | [diff] [blame] | 732 | full_ref->parent = parent; |
| 733 | full_ref->root = ref_root; |
| 734 | if (parent) |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 735 | ref->type = BTRFS_SHARED_DATA_REF_KEY; |
Arne Jansen | eebe063 | 2011-09-14 14:01:24 +0200 | [diff] [blame] | 736 | else |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 737 | ref->type = BTRFS_EXTENT_DATA_REF_KEY; |
Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 738 | |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 739 | full_ref->objectid = owner; |
| 740 | full_ref->offset = offset; |
| 741 | |
Liu Bo | 599c75e | 2013-07-16 19:03:36 +0800 | [diff] [blame] | 742 | trace_add_delayed_data_ref(ref, full_ref, action); |
liubo | 1abe9b8 | 2011-03-24 11:18:59 +0000 | [diff] [blame] | 743 | |
Qu Wenruo | c6fc245 | 2015-03-30 17:03:00 +0800 | [diff] [blame] | 744 | ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref); |
| 745 | |
| 746 | if (ret > 0) |
Miao Xie | 78a6184 | 2012-11-21 02:21:28 +0000 | [diff] [blame] | 747 | kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref); |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 748 | } |
| 749 | |
| 750 | /* |
| 751 | * add a delayed tree ref. This does all of the accounting required |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 752 | * to make sure the delayed ref is eventually processed before this |
| 753 | * transaction commits. |
| 754 | */ |
Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 755 | int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, |
| 756 | struct btrfs_trans_handle *trans, |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 757 | u64 bytenr, u64 num_bytes, u64 parent, |
| 758 | u64 ref_root, int level, int action, |
Filipe Manana | b06c4bf | 2015-10-23 07:52:54 +0100 | [diff] [blame] | 759 | struct btrfs_delayed_extent_op *extent_op) |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 760 | { |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 761 | struct btrfs_delayed_tree_ref *ref; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 762 | struct btrfs_delayed_ref_head *head_ref; |
| 763 | struct btrfs_delayed_ref_root *delayed_refs; |
Qu Wenruo | 3368d00 | 2015-04-16 14:34:17 +0800 | [diff] [blame] | 764 | struct btrfs_qgroup_extent_record *record = NULL; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 765 | |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 766 | BUG_ON(extent_op && extent_op->is_data); |
Miao Xie | 78a6184 | 2012-11-21 02:21:28 +0000 | [diff] [blame] | 767 | ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS); |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 768 | if (!ref) |
| 769 | return -ENOMEM; |
| 770 | |
Miao Xie | 78a6184 | 2012-11-21 02:21:28 +0000 | [diff] [blame] | 771 | head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); |
Dan Carpenter | 5a5003df | 2015-06-24 17:32:33 +0300 | [diff] [blame] | 772 | if (!head_ref) |
| 773 | goto free_ref; |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 774 | |
Qu Wenruo | 3368d00 | 2015-04-16 14:34:17 +0800 | [diff] [blame] | 775 | if (fs_info->quota_enabled && is_fstree(ref_root)) { |
| 776 | record = kmalloc(sizeof(*record), GFP_NOFS); |
Dan Carpenter | 5a5003df | 2015-06-24 17:32:33 +0300 | [diff] [blame] | 777 | if (!record) |
| 778 | goto free_head_ref; |
Qu Wenruo | 3368d00 | 2015-04-16 14:34:17 +0800 | [diff] [blame] | 779 | } |
| 780 | |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 781 | head_ref->extent_op = extent_op; |
| 782 | |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 783 | delayed_refs = &trans->transaction->delayed_refs; |
| 784 | spin_lock(&delayed_refs->lock); |
| 785 | |
| 786 | /* |
| 787 | * insert both the head node and the new ref without dropping |
| 788 | * the spin lock |
| 789 | */ |
Qu Wenruo | 3368d00 | 2015-04-16 14:34:17 +0800 | [diff] [blame] | 790 | head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record, |
Qu Wenruo | 5846a3c | 2015-10-26 14:11:18 +0800 | [diff] [blame] | 791 | bytenr, num_bytes, 0, 0, action, 0); |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 792 | |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 793 | add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr, |
Filipe Manana | b06c4bf | 2015-10-23 07:52:54 +0100 | [diff] [blame] | 794 | num_bytes, parent, ref_root, level, action); |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 795 | spin_unlock(&delayed_refs->lock); |
Jan Schmidt | 95a0607 | 2012-05-29 17:06:54 +0200 | [diff] [blame] | 796 | |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 797 | return 0; |
Dan Carpenter | 5a5003df | 2015-06-24 17:32:33 +0300 | [diff] [blame] | 798 | |
| 799 | free_head_ref: |
| 800 | kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref); |
| 801 | free_ref: |
| 802 | kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); |
| 803 | |
| 804 | return -ENOMEM; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 805 | } |
| 806 | |
| 807 | /* |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 808 | * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref. |
| 809 | */ |
Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 810 | int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, |
| 811 | struct btrfs_trans_handle *trans, |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 812 | u64 bytenr, u64 num_bytes, |
| 813 | u64 parent, u64 ref_root, |
Qu Wenruo | 5846a3c | 2015-10-26 14:11:18 +0800 | [diff] [blame] | 814 | u64 owner, u64 offset, u64 reserved, int action, |
Filipe Manana | b06c4bf | 2015-10-23 07:52:54 +0100 | [diff] [blame] | 815 | struct btrfs_delayed_extent_op *extent_op) |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 816 | { |
| 817 | struct btrfs_delayed_data_ref *ref; |
| 818 | struct btrfs_delayed_ref_head *head_ref; |
| 819 | struct btrfs_delayed_ref_root *delayed_refs; |
Qu Wenruo | 3368d00 | 2015-04-16 14:34:17 +0800 | [diff] [blame] | 820 | struct btrfs_qgroup_extent_record *record = NULL; |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 821 | |
| 822 | BUG_ON(extent_op && !extent_op->is_data); |
Miao Xie | 78a6184 | 2012-11-21 02:21:28 +0000 | [diff] [blame] | 823 | ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS); |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 824 | if (!ref) |
| 825 | return -ENOMEM; |
| 826 | |
Miao Xie | 78a6184 | 2012-11-21 02:21:28 +0000 | [diff] [blame] | 827 | head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 828 | if (!head_ref) { |
Miao Xie | 78a6184 | 2012-11-21 02:21:28 +0000 | [diff] [blame] | 829 | kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 830 | return -ENOMEM; |
| 831 | } |
| 832 | |
Qu Wenruo | 3368d00 | 2015-04-16 14:34:17 +0800 | [diff] [blame] | 833 | if (fs_info->quota_enabled && is_fstree(ref_root)) { |
| 834 | record = kmalloc(sizeof(*record), GFP_NOFS); |
| 835 | if (!record) { |
| 836 | kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); |
| 837 | kmem_cache_free(btrfs_delayed_ref_head_cachep, |
| 838 | head_ref); |
| 839 | return -ENOMEM; |
| 840 | } |
| 841 | } |
| 842 | |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 843 | head_ref->extent_op = extent_op; |
| 844 | |
| 845 | delayed_refs = &trans->transaction->delayed_refs; |
| 846 | spin_lock(&delayed_refs->lock); |
| 847 | |
| 848 | /* |
| 849 | * insert both the head node and the new ref without dropping |
| 850 | * the spin lock |
| 851 | */ |
Qu Wenruo | 3368d00 | 2015-04-16 14:34:17 +0800 | [diff] [blame] | 852 | head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record, |
Qu Wenruo | 5846a3c | 2015-10-26 14:11:18 +0800 | [diff] [blame] | 853 | bytenr, num_bytes, ref_root, reserved, |
| 854 | action, 1); |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 855 | |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 856 | add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr, |
Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 857 | num_bytes, parent, ref_root, owner, offset, |
Filipe Manana | b06c4bf | 2015-10-23 07:52:54 +0100 | [diff] [blame] | 858 | action); |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 859 | spin_unlock(&delayed_refs->lock); |
Jan Schmidt | 95a0607 | 2012-05-29 17:06:54 +0200 | [diff] [blame] | 860 | |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 861 | return 0; |
| 862 | } |
| 863 | |
Qu Wenruo | f64d5ca | 2015-09-08 17:08:36 +0800 | [diff] [blame] | 864 | int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info, |
| 865 | struct btrfs_trans_handle *trans, |
| 866 | u64 ref_root, u64 bytenr, u64 num_bytes) |
| 867 | { |
| 868 | struct btrfs_delayed_ref_root *delayed_refs; |
| 869 | struct btrfs_delayed_ref_head *ref_head; |
| 870 | int ret = 0; |
| 871 | |
| 872 | if (!fs_info->quota_enabled || !is_fstree(ref_root)) |
| 873 | return 0; |
| 874 | |
| 875 | delayed_refs = &trans->transaction->delayed_refs; |
| 876 | |
| 877 | spin_lock(&delayed_refs->lock); |
| 878 | ref_head = find_ref_head(&delayed_refs->href_root, bytenr, 0); |
| 879 | if (!ref_head) { |
| 880 | ret = -ENOENT; |
| 881 | goto out; |
| 882 | } |
| 883 | WARN_ON(ref_head->qgroup_reserved || ref_head->qgroup_ref_root); |
| 884 | ref_head->qgroup_ref_root = ref_root; |
| 885 | ref_head->qgroup_reserved = num_bytes; |
| 886 | out: |
| 887 | spin_unlock(&delayed_refs->lock); |
| 888 | return ret; |
| 889 | } |
| 890 | |
Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 891 | int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, |
| 892 | struct btrfs_trans_handle *trans, |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 893 | u64 bytenr, u64 num_bytes, |
| 894 | struct btrfs_delayed_extent_op *extent_op) |
| 895 | { |
| 896 | struct btrfs_delayed_ref_head *head_ref; |
| 897 | struct btrfs_delayed_ref_root *delayed_refs; |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 898 | |
Miao Xie | 78a6184 | 2012-11-21 02:21:28 +0000 | [diff] [blame] | 899 | head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 900 | if (!head_ref) |
| 901 | return -ENOMEM; |
| 902 | |
| 903 | head_ref->extent_op = extent_op; |
| 904 | |
| 905 | delayed_refs = &trans->transaction->delayed_refs; |
| 906 | spin_lock(&delayed_refs->lock); |
| 907 | |
Qu Wenruo | 3368d00 | 2015-04-16 14:34:17 +0800 | [diff] [blame] | 908 | add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr, |
Qu Wenruo | 5846a3c | 2015-10-26 14:11:18 +0800 | [diff] [blame] | 909 | num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD, |
Qu Wenruo | 3368d00 | 2015-04-16 14:34:17 +0800 | [diff] [blame] | 910 | extent_op->is_data); |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 911 | |
| 912 | spin_unlock(&delayed_refs->lock); |
| 913 | return 0; |
| 914 | } |
| 915 | |
| 916 | /* |
Chris Mason | 1887be6 | 2009-03-13 10:11:24 -0400 | [diff] [blame] | 917 | * this does a simple search for the head node for a given extent. |
| 918 | * It must be called with the delayed ref spinlock held, and it returns |
| 919 | * the head node if any where found, or NULL if not. |
| 920 | */ |
| 921 | struct btrfs_delayed_ref_head * |
| 922 | btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr) |
| 923 | { |
Chris Mason | 1887be6 | 2009-03-13 10:11:24 -0400 | [diff] [blame] | 924 | struct btrfs_delayed_ref_root *delayed_refs; |
| 925 | |
| 926 | delayed_refs = &trans->transaction->delayed_refs; |
Filipe Manana | 85fdfdf | 2014-02-12 15:07:53 +0000 | [diff] [blame] | 927 | return find_ref_head(&delayed_refs->href_root, bytenr, 0); |
Chris Mason | 1887be6 | 2009-03-13 10:11:24 -0400 | [diff] [blame] | 928 | } |
Miao Xie | 78a6184 | 2012-11-21 02:21:28 +0000 | [diff] [blame] | 929 | |
| 930 | void btrfs_delayed_ref_exit(void) |
| 931 | { |
| 932 | if (btrfs_delayed_ref_head_cachep) |
| 933 | kmem_cache_destroy(btrfs_delayed_ref_head_cachep); |
| 934 | if (btrfs_delayed_tree_ref_cachep) |
| 935 | kmem_cache_destroy(btrfs_delayed_tree_ref_cachep); |
| 936 | if (btrfs_delayed_data_ref_cachep) |
| 937 | kmem_cache_destroy(btrfs_delayed_data_ref_cachep); |
| 938 | if (btrfs_delayed_extent_op_cachep) |
| 939 | kmem_cache_destroy(btrfs_delayed_extent_op_cachep); |
| 940 | } |
| 941 | |
| 942 | int btrfs_delayed_ref_init(void) |
| 943 | { |
| 944 | btrfs_delayed_ref_head_cachep = kmem_cache_create( |
| 945 | "btrfs_delayed_ref_head", |
| 946 | sizeof(struct btrfs_delayed_ref_head), 0, |
| 947 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); |
| 948 | if (!btrfs_delayed_ref_head_cachep) |
| 949 | goto fail; |
| 950 | |
| 951 | btrfs_delayed_tree_ref_cachep = kmem_cache_create( |
| 952 | "btrfs_delayed_tree_ref", |
| 953 | sizeof(struct btrfs_delayed_tree_ref), 0, |
| 954 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); |
| 955 | if (!btrfs_delayed_tree_ref_cachep) |
| 956 | goto fail; |
| 957 | |
| 958 | btrfs_delayed_data_ref_cachep = kmem_cache_create( |
| 959 | "btrfs_delayed_data_ref", |
| 960 | sizeof(struct btrfs_delayed_data_ref), 0, |
| 961 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); |
| 962 | if (!btrfs_delayed_data_ref_cachep) |
| 963 | goto fail; |
| 964 | |
| 965 | btrfs_delayed_extent_op_cachep = kmem_cache_create( |
| 966 | "btrfs_delayed_extent_op", |
| 967 | sizeof(struct btrfs_delayed_extent_op), 0, |
| 968 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); |
| 969 | if (!btrfs_delayed_extent_op_cachep) |
| 970 | goto fail; |
| 971 | |
| 972 | return 0; |
| 973 | fail: |
| 974 | btrfs_delayed_ref_exit(); |
| 975 | return -ENOMEM; |
| 976 | } |