blob: 95a9faeb9dc417c6653cdb76b213ec426e6d4e37 [file] [log] [blame]
Yan Zheng31153d82008-07-28 15:32:19 -04001/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include "ctree.h"
21#include "ref-cache.h"
22#include "transaction.h"
23
24struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(int nr_extents)
25{
26 struct btrfs_leaf_ref *ref;
27
28 ref = kmalloc(btrfs_leaf_ref_size(nr_extents), GFP_NOFS);
29 if (ref) {
30 memset(ref, 0, sizeof(*ref));
31 atomic_set(&ref->usage, 1);
32 }
33 return ref;
34}
35
36void btrfs_free_leaf_ref(struct btrfs_leaf_ref *ref)
37{
38 if (!ref)
39 return;
40 WARN_ON(atomic_read(&ref->usage) == 0);
41 if (atomic_dec_and_test(&ref->usage)) {
42 BUG_ON(ref->in_tree);
43 kfree(ref);
44 }
45}
46
47static int comp_keys(struct btrfs_key *k1, struct btrfs_key *k2)
48{
49 if (k1->objectid > k2->objectid)
50 return 1;
51 if (k1->objectid < k2->objectid)
52 return -1;
53 if (k1->type > k2->type)
54 return 1;
55 if (k1->type < k2->type)
56 return -1;
57 if (k1->offset > k2->offset)
58 return 1;
59 if (k1->offset < k2->offset)
60 return -1;
61 return 0;
62}
63
64static struct rb_node *tree_insert(struct rb_root *root, struct btrfs_key *key,
65 struct rb_node *node)
66{
67 struct rb_node ** p = &root->rb_node;
68 struct rb_node * parent = NULL;
69 struct btrfs_leaf_ref *entry;
70 int ret;
71
72 while(*p) {
73 parent = *p;
74 entry = rb_entry(parent, struct btrfs_leaf_ref, rb_node);
75 WARN_ON(!entry->in_tree);
76
77 ret = comp_keys(key, &entry->key);
78 if (ret < 0)
79 p = &(*p)->rb_left;
80 else if (ret > 0)
81 p = &(*p)->rb_right;
82 else
83 return parent;
84 }
85
86 entry = rb_entry(node, struct btrfs_leaf_ref, rb_node);
87 entry->in_tree = 1;
88 rb_link_node(node, parent, p);
89 rb_insert_color(node, root);
90 return NULL;
91}
92
93static struct rb_node *tree_search(struct rb_root *root, struct btrfs_key *key)
94{
95 struct rb_node * n = root->rb_node;
96 struct btrfs_leaf_ref *entry;
97 int ret;
98
99 while(n) {
100 entry = rb_entry(n, struct btrfs_leaf_ref, rb_node);
101 WARN_ON(!entry->in_tree);
102
103 ret = comp_keys(key, &entry->key);
104 if (ret < 0)
105 n = n->rb_left;
106 else if (ret > 0)
107 n = n->rb_right;
108 else
109 return n;
110 }
111 return NULL;
112}
113
114int btrfs_remove_leaf_refs(struct btrfs_root *root)
115{
116 struct rb_node *rb;
117 struct btrfs_leaf_ref *ref = NULL;
118 struct btrfs_leaf_ref_tree *tree = root->ref_tree;
119
120 if (!tree)
121 return 0;
122
123 spin_lock(&tree->lock);
124 while(!btrfs_leaf_ref_tree_empty(tree)) {
125 tree->last = NULL;
126 rb = rb_first(&tree->root);
127 ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node);
128 rb_erase(&ref->rb_node, &tree->root);
129 ref->in_tree = 0;
130
131 spin_unlock(&tree->lock);
132
133 btrfs_free_leaf_ref(ref);
134
135 cond_resched();
136 spin_lock(&tree->lock);
137 }
138 spin_unlock(&tree->lock);
139 return 0;
140}
141
142struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
143 struct btrfs_key *key)
144{
145 struct rb_node *rb;
146 struct btrfs_leaf_ref *ref = NULL;
147 struct btrfs_leaf_ref_tree *tree = root->ref_tree;
148
149 if (!tree)
150 return NULL;
151
152 spin_lock(&tree->lock);
153 if (tree->last && comp_keys(key, &tree->last->key) == 0) {
154 ref = tree->last;
155 } else {
156 rb = tree_search(&tree->root, key);
157 if (rb) {
158 ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node);
159 tree->last = ref;
160 }
161 }
162 if (ref)
163 atomic_inc(&ref->usage);
164 spin_unlock(&tree->lock);
165 return ref;
166}
167
168int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
169{
170 int ret = 0;
171 struct rb_node *rb;
172 size_t size = btrfs_leaf_ref_size(ref->nritems);
173 struct btrfs_leaf_ref_tree *tree = root->ref_tree;
174 struct btrfs_transaction *trans = root->fs_info->running_transaction;
175
176 spin_lock(&tree->lock);
177 rb = tree_insert(&tree->root, &ref->key, &ref->rb_node);
178 if (rb) {
179 ret = -EEXIST;
180 } else {
181 spin_lock(&root->fs_info->ref_cache_lock);
182 root->fs_info->total_ref_cache_size += size;
183 if (trans && tree->generation == trans->transid)
184 root->fs_info->running_ref_cache_size += size;
185 spin_unlock(&root->fs_info->ref_cache_lock);
186
187 tree->last = ref;
188 atomic_inc(&ref->usage);
189 }
190 spin_unlock(&tree->lock);
191 return ret;
192}
193
194int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
195{
196 size_t size = btrfs_leaf_ref_size(ref->nritems);
197 struct btrfs_leaf_ref_tree *tree = root->ref_tree;
198 struct btrfs_transaction *trans = root->fs_info->running_transaction;
199
200 BUG_ON(!ref->in_tree);
201 spin_lock(&tree->lock);
202
203 spin_lock(&root->fs_info->ref_cache_lock);
204 root->fs_info->total_ref_cache_size -= size;
205 if (trans && tree->generation == trans->transid)
206 root->fs_info->running_ref_cache_size -= size;
207 spin_unlock(&root->fs_info->ref_cache_lock);
208
209 if (tree->last == ref) {
210 struct rb_node *next = rb_next(&ref->rb_node);
211 if (next) {
212 tree->last = rb_entry(next, struct btrfs_leaf_ref,
213 rb_node);
214 } else
215 tree->last = NULL;
216 }
217
218 rb_erase(&ref->rb_node, &tree->root);
219 ref->in_tree = 0;
220
221 spin_unlock(&tree->lock);
222
223 btrfs_free_leaf_ref(ref);
224 return 0;
225}
226