blob: ee1c76cf8886a9ca0af1415560ad1e82d806ae39 [file] [log] [blame]
Stefan Behrens07b30a42013-08-15 17:11:17 +02001/*
2 * Copyright (C) STRATO AG 2013. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#include <linux/uuid.h>
19#include <asm/unaligned.h>
20#include "ctree.h"
21#include "transaction.h"
22#include "disk-io.h"
23#include "print-tree.h"
24
25
26static void btrfs_uuid_to_key(u8 *uuid, u8 type, struct btrfs_key *key)
27{
28 key->type = type;
29 key->objectid = get_unaligned_le64(uuid);
30 key->offset = get_unaligned_le64(uuid + sizeof(u64));
31}
32
33/* return -ENOENT for !found, < 0 for errors, or 0 if an item was found */
34static int btrfs_uuid_tree_lookup(struct btrfs_root *uuid_root, u8 *uuid,
35 u8 type, u64 subid)
36{
37 int ret;
38 struct btrfs_path *path = NULL;
39 struct extent_buffer *eb;
40 int slot;
41 u32 item_size;
42 unsigned long offset;
43 struct btrfs_key key;
44
45 if (WARN_ON_ONCE(!uuid_root)) {
46 ret = -ENOENT;
47 goto out;
48 }
49
50 path = btrfs_alloc_path();
51 if (!path) {
52 ret = -ENOMEM;
53 goto out;
54 }
55
56 btrfs_uuid_to_key(uuid, type, &key);
57 ret = btrfs_search_slot(NULL, uuid_root, &key, path, 0, 0);
58 if (ret < 0) {
59 goto out;
60 } else if (ret > 0) {
61 ret = -ENOENT;
62 goto out;
63 }
64
65 eb = path->nodes[0];
66 slot = path->slots[0];
67 item_size = btrfs_item_size_nr(eb, slot);
68 offset = btrfs_item_ptr_offset(eb, slot);
69 ret = -ENOENT;
70
71 if (!IS_ALIGNED(item_size, sizeof(u64))) {
Jeff Mahoney5d163e02016-09-20 10:05:00 -040072 btrfs_warn(uuid_root->fs_info,
73 "uuid item with illegal size %lu!",
74 (unsigned long)item_size);
Stefan Behrens07b30a42013-08-15 17:11:17 +020075 goto out;
76 }
77 while (item_size) {
78 __le64 data;
79
80 read_extent_buffer(eb, &data, offset, sizeof(data));
81 if (le64_to_cpu(data) == subid) {
82 ret = 0;
83 break;
84 }
85 offset += sizeof(data);
86 item_size -= sizeof(data);
87 }
88
89out:
90 btrfs_free_path(path);
91 return ret;
92}
93
94int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans,
95 struct btrfs_root *uuid_root, u8 *uuid, u8 type,
96 u64 subid_cpu)
97{
98 int ret;
99 struct btrfs_path *path = NULL;
100 struct btrfs_key key;
101 struct extent_buffer *eb;
102 int slot;
103 unsigned long offset;
104 __le64 subid_le;
105
106 ret = btrfs_uuid_tree_lookup(uuid_root, uuid, type, subid_cpu);
107 if (ret != -ENOENT)
108 return ret;
109
110 if (WARN_ON_ONCE(!uuid_root)) {
111 ret = -EINVAL;
112 goto out;
113 }
114
115 btrfs_uuid_to_key(uuid, type, &key);
116
117 path = btrfs_alloc_path();
118 if (!path) {
119 ret = -ENOMEM;
120 goto out;
121 }
122
123 ret = btrfs_insert_empty_item(trans, uuid_root, path, &key,
124 sizeof(subid_le));
125 if (ret >= 0) {
126 /* Add an item for the type for the first time */
127 eb = path->nodes[0];
128 slot = path->slots[0];
129 offset = btrfs_item_ptr_offset(eb, slot);
130 } else if (ret == -EEXIST) {
131 /*
132 * An item with that type already exists.
133 * Extend the item and store the new subid at the end.
134 */
135 btrfs_extend_item(uuid_root, path, sizeof(subid_le));
136 eb = path->nodes[0];
137 slot = path->slots[0];
138 offset = btrfs_item_ptr_offset(eb, slot);
139 offset += btrfs_item_size_nr(eb, slot) - sizeof(subid_le);
140 } else if (ret < 0) {
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400141 btrfs_warn(uuid_root->fs_info,
142 "insert uuid item failed %d (0x%016llx, 0x%016llx) type %u!",
143 ret, (unsigned long long)key.objectid,
144 (unsigned long long)key.offset, type);
Stefan Behrens07b30a42013-08-15 17:11:17 +0200145 goto out;
146 }
147
148 ret = 0;
149 subid_le = cpu_to_le64(subid_cpu);
150 write_extent_buffer(eb, &subid_le, offset, sizeof(subid_le));
151 btrfs_mark_buffer_dirty(eb);
152
153out:
154 btrfs_free_path(path);
155 return ret;
156}
157
158int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
159 struct btrfs_root *uuid_root, u8 *uuid, u8 type,
160 u64 subid)
161{
162 int ret;
163 struct btrfs_path *path = NULL;
164 struct btrfs_key key;
165 struct extent_buffer *eb;
166 int slot;
167 unsigned long offset;
168 u32 item_size;
169 unsigned long move_dst;
170 unsigned long move_src;
171 unsigned long move_len;
172
173 if (WARN_ON_ONCE(!uuid_root)) {
174 ret = -EINVAL;
175 goto out;
176 }
177
178 btrfs_uuid_to_key(uuid, type, &key);
179
180 path = btrfs_alloc_path();
181 if (!path) {
182 ret = -ENOMEM;
183 goto out;
184 }
185
186 ret = btrfs_search_slot(trans, uuid_root, &key, path, -1, 1);
187 if (ret < 0) {
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400188 btrfs_warn(uuid_root->fs_info,
189 "error %d while searching for uuid item!", ret);
Stefan Behrens07b30a42013-08-15 17:11:17 +0200190 goto out;
191 }
192 if (ret > 0) {
193 ret = -ENOENT;
194 goto out;
195 }
196
197 eb = path->nodes[0];
198 slot = path->slots[0];
199 offset = btrfs_item_ptr_offset(eb, slot);
200 item_size = btrfs_item_size_nr(eb, slot);
201 if (!IS_ALIGNED(item_size, sizeof(u64))) {
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400202 btrfs_warn(uuid_root->fs_info,
203 "uuid item with illegal size %lu!",
204 (unsigned long)item_size);
Stefan Behrens07b30a42013-08-15 17:11:17 +0200205 ret = -ENOENT;
206 goto out;
207 }
208 while (item_size) {
209 __le64 read_subid;
210
211 read_extent_buffer(eb, &read_subid, offset, sizeof(read_subid));
212 if (le64_to_cpu(read_subid) == subid)
213 break;
214 offset += sizeof(read_subid);
215 item_size -= sizeof(read_subid);
216 }
217
218 if (!item_size) {
219 ret = -ENOENT;
220 goto out;
221 }
222
223 item_size = btrfs_item_size_nr(eb, slot);
224 if (item_size == sizeof(subid)) {
225 ret = btrfs_del_item(trans, uuid_root, path);
226 goto out;
227 }
228
229 move_dst = offset;
230 move_src = offset + sizeof(subid);
231 move_len = item_size - (move_src - btrfs_item_ptr_offset(eb, slot));
232 memmove_extent_buffer(eb, move_dst, move_src, move_len);
233 btrfs_truncate_item(uuid_root, path, item_size - sizeof(subid), 1);
234
235out:
236 btrfs_free_path(path);
237 return ret;
238}
Stefan Behrens70f80172013-08-15 17:11:23 +0200239
240static int btrfs_uuid_iter_rem(struct btrfs_root *uuid_root, u8 *uuid, u8 type,
241 u64 subid)
242{
243 struct btrfs_trans_handle *trans;
244 int ret;
245
246 /* 1 - for the uuid item */
247 trans = btrfs_start_transaction(uuid_root, 1);
248 if (IS_ERR(trans)) {
249 ret = PTR_ERR(trans);
250 goto out;
251 }
252
253 ret = btrfs_uuid_tree_rem(trans, uuid_root, uuid, type, subid);
254 btrfs_end_transaction(trans, uuid_root);
255
256out:
257 return ret;
258}
259
260int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info,
261 int (*check_func)(struct btrfs_fs_info *, u8 *, u8,
262 u64))
263{
264 struct btrfs_root *root = fs_info->uuid_root;
265 struct btrfs_key key;
Stefan Behrens70f80172013-08-15 17:11:23 +0200266 struct btrfs_path *path;
267 int ret = 0;
268 struct extent_buffer *leaf;
269 int slot;
270 u32 item_size;
271 unsigned long offset;
272
273 path = btrfs_alloc_path();
274 if (!path) {
275 ret = -ENOMEM;
276 goto out;
277 }
278
279 key.objectid = 0;
280 key.type = 0;
281 key.offset = 0;
Stefan Behrens70f80172013-08-15 17:11:23 +0200282
283again_search_slot:
Filipe David Borba Manana6174d3c2013-10-01 16:13:42 +0100284 ret = btrfs_search_forward(root, &key, path, 0);
Stefan Behrens70f80172013-08-15 17:11:23 +0200285 if (ret) {
286 if (ret > 0)
287 ret = 0;
288 goto out;
289 }
290
291 while (1) {
292 cond_resched();
293 leaf = path->nodes[0];
294 slot = path->slots[0];
295 btrfs_item_key_to_cpu(leaf, &key, slot);
296
297 if (key.type != BTRFS_UUID_KEY_SUBVOL &&
298 key.type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
299 goto skip;
300
301 offset = btrfs_item_ptr_offset(leaf, slot);
302 item_size = btrfs_item_size_nr(leaf, slot);
303 if (!IS_ALIGNED(item_size, sizeof(u64))) {
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400304 btrfs_warn(fs_info,
305 "uuid item with illegal size %lu!",
306 (unsigned long)item_size);
Stefan Behrens70f80172013-08-15 17:11:23 +0200307 goto skip;
308 }
309 while (item_size) {
310 u8 uuid[BTRFS_UUID_SIZE];
311 __le64 subid_le;
312 u64 subid_cpu;
313
314 put_unaligned_le64(key.objectid, uuid);
315 put_unaligned_le64(key.offset, uuid + sizeof(u64));
316 read_extent_buffer(leaf, &subid_le, offset,
317 sizeof(subid_le));
318 subid_cpu = le64_to_cpu(subid_le);
319 ret = check_func(fs_info, uuid, key.type, subid_cpu);
320 if (ret < 0)
321 goto out;
322 if (ret > 0) {
323 btrfs_release_path(path);
324 ret = btrfs_uuid_iter_rem(root, uuid, key.type,
325 subid_cpu);
326 if (ret == 0) {
327 /*
328 * this might look inefficient, but the
329 * justification is that it is an
330 * exception that check_func returns 1,
331 * and that in the regular case only one
332 * entry per UUID exists.
333 */
334 goto again_search_slot;
335 }
336 if (ret < 0 && ret != -ENOENT)
337 goto out;
Josef Baciked6421d2019-12-06 11:39:00 -0500338 key.offset++;
339 goto again_search_slot;
Stefan Behrens70f80172013-08-15 17:11:23 +0200340 }
341 item_size -= sizeof(subid_le);
342 offset += sizeof(subid_le);
343 }
344
345skip:
346 ret = btrfs_next_item(root, path);
347 if (ret == 0)
348 continue;
349 else if (ret > 0)
350 ret = 0;
351 break;
352 }
353
354out:
355 btrfs_free_path(path);
Pan Bian79dec012016-12-04 12:51:53 +0800356 return ret;
Stefan Behrens70f80172013-08-15 17:11:23 +0200357}