blob: f75fcaf79aebd11d54ebcc6df94d6f5b67c55dfe [file] [log] [blame]
Chris Mason56bec292009-03-13 10:10:06 -04001/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#ifndef __DELAYED_REF__
19#define __DELAYED_REF__
20
Wang Sheng-Hui44a075b2012-09-21 08:21:01 +080021/* these are the possible values of struct btrfs_delayed_ref_node->action */
Chris Mason56bec292009-03-13 10:10:06 -040022#define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */
23#define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */
24#define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */
Chris Mason1a81af42009-03-25 09:55:11 -040025#define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */
Chris Mason56bec292009-03-13 10:10:06 -040026
27struct btrfs_delayed_ref_node {
28 struct rb_node rb_node;
29
30 /* the starting bytenr of the extent */
31 u64 bytenr;
32
Chris Mason56bec292009-03-13 10:10:06 -040033 /* the size of the extent */
34 u64 num_bytes;
35
Arne Jansen00f04b82011-09-14 12:37:00 +020036 /* seq number to keep track of insertion order */
37 u64 seq;
38
Chris Mason56bec292009-03-13 10:10:06 -040039 /* ref count on this data structure */
40 atomic_t refs;
41
42 /*
43 * how many refs is this entry adding or deleting. For
44 * head refs, this may be a negative number because it is keeping
45 * track of the total mods done to the reference count.
46 * For individual refs, this will always be a positive number
47 *
48 * It may be more than one, since it is possible for a single
49 * parent to have more than one ref on an extent
50 */
51 int ref_mod;
52
Yan Zheng5d4f98a2009-06-10 10:45:14 -040053 unsigned int action:8;
54 unsigned int type:8;
Chris Mason56bec292009-03-13 10:10:06 -040055 /* is this node still in the rbtree? */
Yan Zheng5d4f98a2009-06-10 10:45:14 -040056 unsigned int is_head:1;
Chris Mason56bec292009-03-13 10:10:06 -040057 unsigned int in_tree:1;
58};
59
Yan Zheng5d4f98a2009-06-10 10:45:14 -040060struct btrfs_delayed_extent_op {
61 struct btrfs_disk_key key;
62 u64 flags_to_set;
63 unsigned int update_key:1;
64 unsigned int update_flags:1;
65 unsigned int is_data:1;
66};
67
Chris Mason56bec292009-03-13 10:10:06 -040068/*
69 * the head refs are used to hold a lock on a given extent, which allows us
70 * to make sure that only one process is running the delayed refs
71 * at a time for a single extent. They also store the sum of all the
72 * reference count modifications we've queued up.
73 */
74struct btrfs_delayed_ref_head {
75 struct btrfs_delayed_ref_node node;
76
77 /*
78 * the mutex is held while running the refs, and it is also
79 * held when checking the sum of reference modifications.
80 */
81 struct mutex mutex;
82
Chris Masonc3e69d52009-03-13 10:17:05 -040083 struct list_head cluster;
84
Yan Zheng5d4f98a2009-06-10 10:45:14 -040085 struct btrfs_delayed_extent_op *extent_op;
Chris Mason56bec292009-03-13 10:10:06 -040086 /*
87 * when a new extent is allocated, it is just reserved in memory
88 * The actual extent isn't inserted into the extent allocation tree
89 * until the delayed ref is processed. must_insert_reserved is
90 * used to flag a delayed ref so the accounting can be updated
91 * when a full insert is done.
92 *
93 * It is possible the extent will be freed before it is ever
94 * inserted into the extent allocation tree. In this case
95 * we need to update the in ram accounting to properly reflect
96 * the free has happened.
97 */
98 unsigned int must_insert_reserved:1;
Yan Zheng5d4f98a2009-06-10 10:45:14 -040099 unsigned int is_data:1;
Chris Mason56bec292009-03-13 10:10:06 -0400100};
101
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400102struct btrfs_delayed_tree_ref {
Chris Mason56bec292009-03-13 10:10:06 -0400103 struct btrfs_delayed_ref_node node;
Arne Janseneebe0632011-09-14 14:01:24 +0200104 u64 root;
105 u64 parent;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400106 int level;
107};
Chris Mason56bec292009-03-13 10:10:06 -0400108
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400109struct btrfs_delayed_data_ref {
110 struct btrfs_delayed_ref_node node;
Arne Janseneebe0632011-09-14 14:01:24 +0200111 u64 root;
112 u64 parent;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400113 u64 objectid;
114 u64 offset;
Chris Mason56bec292009-03-13 10:10:06 -0400115};
116
117struct btrfs_delayed_ref_root {
118 struct rb_root root;
119
120 /* this spin lock protects the rbtree and the entries inside */
121 spinlock_t lock;
122
123 /* how many delayed ref updates we've queued, used by the
124 * throttling code
125 */
126 unsigned long num_entries;
127
Chris Masonc3e69d52009-03-13 10:17:05 -0400128 /* total number of head nodes in tree */
129 unsigned long num_heads;
130
131 /* total number of head nodes ready for processing */
132 unsigned long num_heads_ready;
133
Chris Mason56bec292009-03-13 10:10:06 -0400134 /*
Chris Masonbb721702013-01-29 18:44:12 -0500135 * bumped when someone is making progress on the delayed
136 * refs, so that other procs know they are just adding to
137 * contention intead of helping
138 */
139 atomic_t procs_running_refs;
140 atomic_t ref_seq;
141 wait_queue_head_t wait;
142
143 /*
Chris Mason56bec292009-03-13 10:10:06 -0400144 * set when the tree is flushing before a transaction commit,
145 * used by the throttling code to decide if new updates need
146 * to be run right away
147 */
148 int flushing;
Chris Masonc3e69d52009-03-13 10:17:05 -0400149
150 u64 run_delayed_start;
Chris Mason56bec292009-03-13 10:10:06 -0400151};
152
Miao Xie78a61842012-11-21 02:21:28 +0000153extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
154extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
155extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
156extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
157
158int btrfs_delayed_ref_init(void);
159void btrfs_delayed_ref_exit(void);
160
161static inline struct btrfs_delayed_extent_op *
162btrfs_alloc_delayed_extent_op(void)
163{
164 return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS);
165}
166
167static inline void
168btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
169{
170 if (op)
171 kmem_cache_free(btrfs_delayed_extent_op_cachep, op);
172}
173
Chris Mason56bec292009-03-13 10:10:06 -0400174static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
175{
176 WARN_ON(atomic_read(&ref->refs) == 0);
177 if (atomic_dec_and_test(&ref->refs)) {
178 WARN_ON(ref->in_tree);
Miao Xie78a61842012-11-21 02:21:28 +0000179 switch (ref->type) {
180 case BTRFS_TREE_BLOCK_REF_KEY:
181 case BTRFS_SHARED_BLOCK_REF_KEY:
182 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
183 break;
184 case BTRFS_EXTENT_DATA_REF_KEY:
185 case BTRFS_SHARED_DATA_REF_KEY:
186 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
187 break;
188 case 0:
189 kmem_cache_free(btrfs_delayed_ref_head_cachep, ref);
190 break;
191 default:
192 BUG();
193 }
Chris Mason56bec292009-03-13 10:10:06 -0400194 }
195}
196
Arne Jansen66d7e7f2011-09-12 15:26:38 +0200197int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
198 struct btrfs_trans_handle *trans,
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400199 u64 bytenr, u64 num_bytes, u64 parent,
200 u64 ref_root, int level, int action,
Arne Jansen66d7e7f2011-09-12 15:26:38 +0200201 struct btrfs_delayed_extent_op *extent_op,
202 int for_cow);
203int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
204 struct btrfs_trans_handle *trans,
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400205 u64 bytenr, u64 num_bytes,
206 u64 parent, u64 ref_root,
207 u64 owner, u64 offset, int action,
Arne Jansen66d7e7f2011-09-12 15:26:38 +0200208 struct btrfs_delayed_extent_op *extent_op,
209 int for_cow);
210int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
211 struct btrfs_trans_handle *trans,
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400212 u64 bytenr, u64 num_bytes,
213 struct btrfs_delayed_extent_op *extent_op);
Josef Bacikae1e2062012-08-07 16:00:32 -0400214void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
215 struct btrfs_fs_info *fs_info,
216 struct btrfs_delayed_ref_root *delayed_refs,
217 struct btrfs_delayed_ref_head *head);
Chris Mason56bec292009-03-13 10:10:06 -0400218
Chris Mason1887be62009-03-13 10:11:24 -0400219struct btrfs_delayed_ref_head *
220btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
Chris Masonc3e69d52009-03-13 10:17:05 -0400221int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
222 struct btrfs_delayed_ref_head *head);
Miao Xie093486c2012-12-19 08:10:10 +0000223static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
224{
225 mutex_unlock(&head->mutex);
226}
227
Chris Masonc3e69d52009-03-13 10:17:05 -0400228int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
229 struct list_head *cluster, u64 search_start);
Miao Xie093486c2012-12-19 08:10:10 +0000230void btrfs_release_ref_cluster(struct list_head *cluster);
Arne Jansen00f04b82011-09-14 12:37:00 +0200231
Jan Schmidt097b8a72012-06-21 11:08:04 +0200232int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
233 struct btrfs_delayed_ref_root *delayed_refs,
Arne Jansen00f04b82011-09-14 12:37:00 +0200234 u64 seq);
235
236/*
Jan Schmidt546adb02012-06-14 16:37:44 +0200237 * delayed refs with a ref_seq > 0 must be held back during backref walking.
238 * this only applies to items in one of the fs-trees. for_cow items never need
239 * to be held back, so they won't get a ref_seq number.
240 */
241static inline int need_ref_seq(int for_cow, u64 rootid)
242{
243 if (for_cow)
244 return 0;
245
246 if (rootid == BTRFS_FS_TREE_OBJECTID)
247 return 1;
248
249 if ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID)
250 return 1;
251
252 return 0;
253}
254
255/*
Chris Mason56bec292009-03-13 10:10:06 -0400256 * a node might live in a head or a regular ref, this lets you
257 * test for the proper type to use.
258 */
259static int btrfs_delayed_ref_is_head(struct btrfs_delayed_ref_node *node)
260{
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400261 return node->is_head;
Chris Mason56bec292009-03-13 10:10:06 -0400262}
263
264/*
265 * helper functions to cast a node into its container
266 */
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400267static inline struct btrfs_delayed_tree_ref *
268btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node)
Chris Mason56bec292009-03-13 10:10:06 -0400269{
270 WARN_ON(btrfs_delayed_ref_is_head(node));
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400271 return container_of(node, struct btrfs_delayed_tree_ref, node);
272}
Chris Mason56bec292009-03-13 10:10:06 -0400273
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400274static inline struct btrfs_delayed_data_ref *
275btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node)
276{
277 WARN_ON(btrfs_delayed_ref_is_head(node));
278 return container_of(node, struct btrfs_delayed_data_ref, node);
Chris Mason56bec292009-03-13 10:10:06 -0400279}
280
281static inline struct btrfs_delayed_ref_head *
282btrfs_delayed_node_to_head(struct btrfs_delayed_ref_node *node)
283{
284 WARN_ON(!btrfs_delayed_ref_is_head(node));
285 return container_of(node, struct btrfs_delayed_ref_head, node);
Chris Mason56bec292009-03-13 10:10:06 -0400286}
287#endif