Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2008 Oracle. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public |
| 6 | * License v2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public |
| 14 | * License along with this program; if not, write to the |
| 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
| 16 | * Boston, MA 021110-1307, USA. |
| 17 | */ |
| 18 | #ifndef __DELAYED_REF__ |
| 19 | #define __DELAYED_REF__ |
| 20 | |
Wang Sheng-Hui | 44a075b | 2012-09-21 08:21:01 +0800 | [diff] [blame] | 21 | /* these are the possible values of struct btrfs_delayed_ref_node->action */ |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 22 | #define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */ |
| 23 | #define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */ |
| 24 | #define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */ |
Chris Mason | 1a81af4 | 2009-03-25 09:55:11 -0400 | [diff] [blame] | 25 | #define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */ |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 26 | |
| 27 | struct btrfs_delayed_ref_node { |
| 28 | struct rb_node rb_node; |
| 29 | |
| 30 | /* the starting bytenr of the extent */ |
| 31 | u64 bytenr; |
| 32 | |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 33 | /* the size of the extent */ |
| 34 | u64 num_bytes; |
| 35 | |
Arne Jansen | 00f04b8 | 2011-09-14 12:37:00 +0200 | [diff] [blame] | 36 | /* seq number to keep track of insertion order */ |
| 37 | u64 seq; |
| 38 | |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 39 | /* ref count on this data structure */ |
| 40 | atomic_t refs; |
| 41 | |
| 42 | /* |
| 43 | * how many refs is this entry adding or deleting. For |
| 44 | * head refs, this may be a negative number because it is keeping |
| 45 | * track of the total mods done to the reference count. |
| 46 | * For individual refs, this will always be a positive number |
| 47 | * |
| 48 | * It may be more than one, since it is possible for a single |
| 49 | * parent to have more than one ref on an extent |
| 50 | */ |
| 51 | int ref_mod; |
| 52 | |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 53 | unsigned int action:8; |
| 54 | unsigned int type:8; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 55 | /* is this node still in the rbtree? */ |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 56 | unsigned int is_head:1; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 57 | unsigned int in_tree:1; |
| 58 | }; |
| 59 | |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 60 | struct btrfs_delayed_extent_op { |
| 61 | struct btrfs_disk_key key; |
| 62 | u64 flags_to_set; |
Josef Bacik | b1c79e0 | 2013-05-09 13:49:30 -0400 | [diff] [blame] | 63 | int level; |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 64 | unsigned int update_key:1; |
| 65 | unsigned int update_flags:1; |
| 66 | unsigned int is_data:1; |
| 67 | }; |
| 68 | |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 69 | /* |
| 70 | * the head refs are used to hold a lock on a given extent, which allows us |
| 71 | * to make sure that only one process is running the delayed refs |
| 72 | * at a time for a single extent. They also store the sum of all the |
| 73 | * reference count modifications we've queued up. |
| 74 | */ |
| 75 | struct btrfs_delayed_ref_head { |
| 76 | struct btrfs_delayed_ref_node node; |
| 77 | |
| 78 | /* |
| 79 | * the mutex is held while running the refs, and it is also |
| 80 | * held when checking the sum of reference modifications. |
| 81 | */ |
| 82 | struct mutex mutex; |
| 83 | |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 84 | spinlock_t lock; |
| 85 | struct rb_root ref_root; |
Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 86 | |
Liu Bo | c46effa | 2013-10-14 12:59:45 +0800 | [diff] [blame] | 87 | struct rb_node href_node; |
| 88 | |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 89 | struct btrfs_delayed_extent_op *extent_op; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 90 | /* |
| 91 | * when a new extent is allocated, it is just reserved in memory |
| 92 | * The actual extent isn't inserted into the extent allocation tree |
| 93 | * until the delayed ref is processed. must_insert_reserved is |
| 94 | * used to flag a delayed ref so the accounting can be updated |
| 95 | * when a full insert is done. |
| 96 | * |
| 97 | * It is possible the extent will be freed before it is ever |
| 98 | * inserted into the extent allocation tree. In this case |
| 99 | * we need to update the in ram accounting to properly reflect |
| 100 | * the free has happened. |
| 101 | */ |
| 102 | unsigned int must_insert_reserved:1; |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 103 | unsigned int is_data:1; |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 104 | unsigned int processing:1; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 105 | }; |
| 106 | |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 107 | struct btrfs_delayed_tree_ref { |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 108 | struct btrfs_delayed_ref_node node; |
Arne Jansen | eebe063 | 2011-09-14 14:01:24 +0200 | [diff] [blame] | 109 | u64 root; |
| 110 | u64 parent; |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 111 | int level; |
| 112 | }; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 113 | |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 114 | struct btrfs_delayed_data_ref { |
| 115 | struct btrfs_delayed_ref_node node; |
Arne Jansen | eebe063 | 2011-09-14 14:01:24 +0200 | [diff] [blame] | 116 | u64 root; |
| 117 | u64 parent; |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 118 | u64 objectid; |
| 119 | u64 offset; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 120 | }; |
| 121 | |
| 122 | struct btrfs_delayed_ref_root { |
Liu Bo | c46effa | 2013-10-14 12:59:45 +0800 | [diff] [blame] | 123 | /* head ref rbtree */ |
| 124 | struct rb_root href_root; |
| 125 | |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 126 | /* this spin lock protects the rbtree and the entries inside */ |
| 127 | spinlock_t lock; |
| 128 | |
| 129 | /* how many delayed ref updates we've queued, used by the |
| 130 | * throttling code |
| 131 | */ |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 132 | atomic_t num_entries; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 133 | |
Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 134 | /* total number of head nodes in tree */ |
| 135 | unsigned long num_heads; |
| 136 | |
| 137 | /* total number of head nodes ready for processing */ |
| 138 | unsigned long num_heads_ready; |
| 139 | |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 140 | /* |
| 141 | * set when the tree is flushing before a transaction commit, |
| 142 | * used by the throttling code to decide if new updates need |
| 143 | * to be run right away |
| 144 | */ |
| 145 | int flushing; |
Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 146 | |
| 147 | u64 run_delayed_start; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 148 | }; |
| 149 | |
Miao Xie | 78a6184 | 2012-11-21 02:21:28 +0000 | [diff] [blame] | 150 | extern struct kmem_cache *btrfs_delayed_ref_head_cachep; |
| 151 | extern struct kmem_cache *btrfs_delayed_tree_ref_cachep; |
| 152 | extern struct kmem_cache *btrfs_delayed_data_ref_cachep; |
| 153 | extern struct kmem_cache *btrfs_delayed_extent_op_cachep; |
| 154 | |
| 155 | int btrfs_delayed_ref_init(void); |
| 156 | void btrfs_delayed_ref_exit(void); |
| 157 | |
| 158 | static inline struct btrfs_delayed_extent_op * |
| 159 | btrfs_alloc_delayed_extent_op(void) |
| 160 | { |
| 161 | return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS); |
| 162 | } |
| 163 | |
| 164 | static inline void |
| 165 | btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op) |
| 166 | { |
| 167 | if (op) |
| 168 | kmem_cache_free(btrfs_delayed_extent_op_cachep, op); |
| 169 | } |
| 170 | |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 171 | static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref) |
| 172 | { |
| 173 | WARN_ON(atomic_read(&ref->refs) == 0); |
| 174 | if (atomic_dec_and_test(&ref->refs)) { |
| 175 | WARN_ON(ref->in_tree); |
Miao Xie | 78a6184 | 2012-11-21 02:21:28 +0000 | [diff] [blame] | 176 | switch (ref->type) { |
| 177 | case BTRFS_TREE_BLOCK_REF_KEY: |
| 178 | case BTRFS_SHARED_BLOCK_REF_KEY: |
| 179 | kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); |
| 180 | break; |
| 181 | case BTRFS_EXTENT_DATA_REF_KEY: |
| 182 | case BTRFS_SHARED_DATA_REF_KEY: |
| 183 | kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); |
| 184 | break; |
| 185 | case 0: |
| 186 | kmem_cache_free(btrfs_delayed_ref_head_cachep, ref); |
| 187 | break; |
| 188 | default: |
| 189 | BUG(); |
| 190 | } |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 191 | } |
| 192 | } |
| 193 | |
Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 194 | int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, |
| 195 | struct btrfs_trans_handle *trans, |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 196 | u64 bytenr, u64 num_bytes, u64 parent, |
| 197 | u64 ref_root, int level, int action, |
Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 198 | struct btrfs_delayed_extent_op *extent_op, |
| 199 | int for_cow); |
| 200 | int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, |
| 201 | struct btrfs_trans_handle *trans, |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 202 | u64 bytenr, u64 num_bytes, |
| 203 | u64 parent, u64 ref_root, |
| 204 | u64 owner, u64 offset, int action, |
Arne Jansen | 66d7e7f | 2011-09-12 15:26:38 +0200 | [diff] [blame] | 205 | struct btrfs_delayed_extent_op *extent_op, |
| 206 | int for_cow); |
| 207 | int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, |
| 208 | struct btrfs_trans_handle *trans, |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 209 | u64 bytenr, u64 num_bytes, |
| 210 | struct btrfs_delayed_extent_op *extent_op); |
Josef Bacik | ae1e206 | 2012-08-07 16:00:32 -0400 | [diff] [blame] | 211 | void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, |
| 212 | struct btrfs_fs_info *fs_info, |
| 213 | struct btrfs_delayed_ref_root *delayed_refs, |
| 214 | struct btrfs_delayed_ref_head *head); |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 215 | |
Chris Mason | 1887be6 | 2009-03-13 10:11:24 -0400 | [diff] [blame] | 216 | struct btrfs_delayed_ref_head * |
| 217 | btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); |
Chris Mason | c3e69d5 | 2009-03-13 10:17:05 -0400 | [diff] [blame] | 218 | int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, |
| 219 | struct btrfs_delayed_ref_head *head); |
Miao Xie | 093486c | 2012-12-19 08:10:10 +0000 | [diff] [blame] | 220 | static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head) |
| 221 | { |
| 222 | mutex_unlock(&head->mutex); |
| 223 | } |
| 224 | |
Josef Bacik | d7df2c7 | 2014-01-23 09:21:38 -0500 | [diff] [blame] | 225 | |
| 226 | struct btrfs_delayed_ref_head * |
| 227 | btrfs_select_ref_head(struct btrfs_trans_handle *trans); |
Arne Jansen | 00f04b8 | 2011-09-14 12:37:00 +0200 | [diff] [blame] | 228 | |
Jan Schmidt | 097b8a7 | 2012-06-21 11:08:04 +0200 | [diff] [blame] | 229 | int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, |
| 230 | struct btrfs_delayed_ref_root *delayed_refs, |
Arne Jansen | 00f04b8 | 2011-09-14 12:37:00 +0200 | [diff] [blame] | 231 | u64 seq); |
| 232 | |
| 233 | /* |
Jan Schmidt | 546adb0 | 2012-06-14 16:37:44 +0200 | [diff] [blame] | 234 | * delayed refs with a ref_seq > 0 must be held back during backref walking. |
| 235 | * this only applies to items in one of the fs-trees. for_cow items never need |
| 236 | * to be held back, so they won't get a ref_seq number. |
| 237 | */ |
| 238 | static inline int need_ref_seq(int for_cow, u64 rootid) |
| 239 | { |
| 240 | if (for_cow) |
| 241 | return 0; |
| 242 | |
| 243 | if (rootid == BTRFS_FS_TREE_OBJECTID) |
| 244 | return 1; |
| 245 | |
| 246 | if ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID) |
| 247 | return 1; |
| 248 | |
| 249 | return 0; |
| 250 | } |
| 251 | |
| 252 | /* |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 253 | * a node might live in a head or a regular ref, this lets you |
| 254 | * test for the proper type to use. |
| 255 | */ |
| 256 | static int btrfs_delayed_ref_is_head(struct btrfs_delayed_ref_node *node) |
| 257 | { |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 258 | return node->is_head; |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 259 | } |
| 260 | |
| 261 | /* |
| 262 | * helper functions to cast a node into its container |
| 263 | */ |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 264 | static inline struct btrfs_delayed_tree_ref * |
| 265 | btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node) |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 266 | { |
| 267 | WARN_ON(btrfs_delayed_ref_is_head(node)); |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 268 | return container_of(node, struct btrfs_delayed_tree_ref, node); |
| 269 | } |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 270 | |
Yan Zheng | 5d4f98a | 2009-06-10 10:45:14 -0400 | [diff] [blame] | 271 | static inline struct btrfs_delayed_data_ref * |
| 272 | btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node) |
| 273 | { |
| 274 | WARN_ON(btrfs_delayed_ref_is_head(node)); |
| 275 | return container_of(node, struct btrfs_delayed_data_ref, node); |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 276 | } |
| 277 | |
| 278 | static inline struct btrfs_delayed_ref_head * |
| 279 | btrfs_delayed_node_to_head(struct btrfs_delayed_ref_node *node) |
| 280 | { |
| 281 | WARN_ON(!btrfs_delayed_ref_is_head(node)); |
| 282 | return container_of(node, struct btrfs_delayed_ref_head, node); |
Chris Mason | 56bec29 | 2009-03-13 10:10:06 -0400 | [diff] [blame] | 283 | } |
| 284 | #endif |