blob: de321c90130c05f498489d171ed958bdae4063c9 [file] [log] [blame]
Arne Jansenbed92ea2012-06-28 18:03:02 +02001/*
2 * Copyright (C) 2011 STRATO. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include <linux/pagemap.h>
21#include <linux/writeback.h>
22#include <linux/blkdev.h>
23#include <linux/rbtree.h>
24#include <linux/slab.h>
25#include <linux/workqueue.h>
Filipe Brandenburger55e301f2013-01-29 06:04:50 +000026#include <linux/btrfs.h>
Arne Jansenbed92ea2012-06-28 18:03:02 +020027
28#include "ctree.h"
29#include "transaction.h"
30#include "disk-io.h"
31#include "locking.h"
32#include "ulist.h"
Arne Jansenbed92ea2012-06-28 18:03:02 +020033#include "backref.h"
Jan Schmidt2f232032013-04-25 16:04:51 +000034#include "extent_io.h"
Josef Bacikfcebe452014-05-13 17:30:47 -070035#include "qgroup.h"
Arne Jansenbed92ea2012-06-28 18:03:02 +020036
37/* TODO XXX FIXME
38 * - subvol delete -> delete when ref goes to 0? delete limits also?
39 * - reorganize keys
40 * - compressed
41 * - sync
Arne Jansenbed92ea2012-06-28 18:03:02 +020042 * - copy also limits on subvol creation
43 * - limit
44 * - caches fuer ulists
45 * - performance benchmarks
46 * - check all ioctl parameters
47 */
48
49/*
50 * one struct for each qgroup, organized in fs_info->qgroup_tree.
51 */
52struct btrfs_qgroup {
53 u64 qgroupid;
54
55 /*
56 * state
57 */
58 u64 rfer; /* referenced */
59 u64 rfer_cmpr; /* referenced compressed */
60 u64 excl; /* exclusive */
61 u64 excl_cmpr; /* exclusive compressed */
62
63 /*
64 * limits
65 */
66 u64 lim_flags; /* which limits are set */
67 u64 max_rfer;
68 u64 max_excl;
69 u64 rsv_rfer;
70 u64 rsv_excl;
71
72 /*
73 * reservation tracking
74 */
75 u64 reserved;
76
77 /*
78 * lists
79 */
80 struct list_head groups; /* groups this group is member of */
81 struct list_head members; /* groups that are members of this group */
82 struct list_head dirty; /* dirty groups */
83 struct rb_node node; /* tree of qgroups */
84
85 /*
86 * temp variables for accounting operations
87 */
Josef Bacikfcebe452014-05-13 17:30:47 -070088 u64 old_refcnt;
89 u64 new_refcnt;
Arne Jansenbed92ea2012-06-28 18:03:02 +020090};
91
92/*
93 * glue structure to represent the relations between qgroups.
94 */
95struct btrfs_qgroup_list {
96 struct list_head next_group;
97 struct list_head next_member;
98 struct btrfs_qgroup *group;
99 struct btrfs_qgroup *member;
100};
101
Josef Bacikfcebe452014-05-13 17:30:47 -0700102#define ptr_to_u64(x) ((u64)(uintptr_t)x)
103#define u64_to_ptr(x) ((struct btrfs_qgroup *)(uintptr_t)x)
104
Jan Schmidtb382a322013-05-28 15:47:24 +0000105static int
106qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
107 int init_flags);
108static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
Jan Schmidt2f232032013-04-25 16:04:51 +0000109
Wang Shilong58400fc2013-04-07 10:50:17 +0000110/* must be called with qgroup_ioctl_lock held */
Arne Jansenbed92ea2012-06-28 18:03:02 +0200111static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
112 u64 qgroupid)
113{
114 struct rb_node *n = fs_info->qgroup_tree.rb_node;
115 struct btrfs_qgroup *qgroup;
116
117 while (n) {
118 qgroup = rb_entry(n, struct btrfs_qgroup, node);
119 if (qgroup->qgroupid < qgroupid)
120 n = n->rb_left;
121 else if (qgroup->qgroupid > qgroupid)
122 n = n->rb_right;
123 else
124 return qgroup;
125 }
126 return NULL;
127}
128
129/* must be called with qgroup_lock held */
130static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
131 u64 qgroupid)
132{
133 struct rb_node **p = &fs_info->qgroup_tree.rb_node;
134 struct rb_node *parent = NULL;
135 struct btrfs_qgroup *qgroup;
136
137 while (*p) {
138 parent = *p;
139 qgroup = rb_entry(parent, struct btrfs_qgroup, node);
140
141 if (qgroup->qgroupid < qgroupid)
142 p = &(*p)->rb_left;
143 else if (qgroup->qgroupid > qgroupid)
144 p = &(*p)->rb_right;
145 else
146 return qgroup;
147 }
148
149 qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
150 if (!qgroup)
151 return ERR_PTR(-ENOMEM);
152
153 qgroup->qgroupid = qgroupid;
154 INIT_LIST_HEAD(&qgroup->groups);
155 INIT_LIST_HEAD(&qgroup->members);
156 INIT_LIST_HEAD(&qgroup->dirty);
157
158 rb_link_node(&qgroup->node, parent, p);
159 rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
160
161 return qgroup;
162}
163
Wang Shilong4082bd32013-08-14 09:13:36 +0800164static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200165{
Arne Jansenbed92ea2012-06-28 18:03:02 +0200166 struct btrfs_qgroup_list *list;
167
Arne Jansenbed92ea2012-06-28 18:03:02 +0200168 list_del(&qgroup->dirty);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200169 while (!list_empty(&qgroup->groups)) {
170 list = list_first_entry(&qgroup->groups,
171 struct btrfs_qgroup_list, next_group);
172 list_del(&list->next_group);
173 list_del(&list->next_member);
174 kfree(list);
175 }
176
177 while (!list_empty(&qgroup->members)) {
178 list = list_first_entry(&qgroup->members,
179 struct btrfs_qgroup_list, next_member);
180 list_del(&list->next_group);
181 list_del(&list->next_member);
182 kfree(list);
183 }
184 kfree(qgroup);
Wang Shilong4082bd32013-08-14 09:13:36 +0800185}
Arne Jansenbed92ea2012-06-28 18:03:02 +0200186
Wang Shilong4082bd32013-08-14 09:13:36 +0800187/* must be called with qgroup_lock held */
188static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
189{
190 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
191
192 if (!qgroup)
193 return -ENOENT;
194
195 rb_erase(&qgroup->node, &fs_info->qgroup_tree);
196 __del_qgroup_rb(qgroup);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200197 return 0;
198}
199
200/* must be called with qgroup_lock held */
201static int add_relation_rb(struct btrfs_fs_info *fs_info,
202 u64 memberid, u64 parentid)
203{
204 struct btrfs_qgroup *member;
205 struct btrfs_qgroup *parent;
206 struct btrfs_qgroup_list *list;
207
208 member = find_qgroup_rb(fs_info, memberid);
209 parent = find_qgroup_rb(fs_info, parentid);
210 if (!member || !parent)
211 return -ENOENT;
212
213 list = kzalloc(sizeof(*list), GFP_ATOMIC);
214 if (!list)
215 return -ENOMEM;
216
217 list->group = parent;
218 list->member = member;
219 list_add_tail(&list->next_group, &member->groups);
220 list_add_tail(&list->next_member, &parent->members);
221
222 return 0;
223}
224
225/* must be called with qgroup_lock held */
226static int del_relation_rb(struct btrfs_fs_info *fs_info,
227 u64 memberid, u64 parentid)
228{
229 struct btrfs_qgroup *member;
230 struct btrfs_qgroup *parent;
231 struct btrfs_qgroup_list *list;
232
233 member = find_qgroup_rb(fs_info, memberid);
234 parent = find_qgroup_rb(fs_info, parentid);
235 if (!member || !parent)
236 return -ENOENT;
237
238 list_for_each_entry(list, &member->groups, next_group) {
239 if (list->group == parent) {
240 list_del(&list->next_group);
241 list_del(&list->next_member);
242 kfree(list);
243 return 0;
244 }
245 }
246 return -ENOENT;
247}
248
Josef Bacikfaa2dbf2014-05-07 17:06:09 -0400249#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
250int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
251 u64 rfer, u64 excl)
252{
253 struct btrfs_qgroup *qgroup;
254
255 qgroup = find_qgroup_rb(fs_info, qgroupid);
256 if (!qgroup)
257 return -EINVAL;
258 if (qgroup->rfer != rfer || qgroup->excl != excl)
259 return -EINVAL;
260 return 0;
261}
262#endif
263
Arne Jansenbed92ea2012-06-28 18:03:02 +0200264/*
265 * The full config is read in one go, only called from open_ctree()
266 * It doesn't use any locking, as at this point we're still single-threaded
267 */
268int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
269{
270 struct btrfs_key key;
271 struct btrfs_key found_key;
272 struct btrfs_root *quota_root = fs_info->quota_root;
273 struct btrfs_path *path = NULL;
274 struct extent_buffer *l;
275 int slot;
276 int ret = 0;
277 u64 flags = 0;
Jan Schmidtb382a322013-05-28 15:47:24 +0000278 u64 rescan_progress = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200279
280 if (!fs_info->quota_enabled)
281 return 0;
282
Wang Shilong1e8f9152013-05-06 11:03:27 +0000283 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
284 if (!fs_info->qgroup_ulist) {
285 ret = -ENOMEM;
286 goto out;
287 }
288
Arne Jansenbed92ea2012-06-28 18:03:02 +0200289 path = btrfs_alloc_path();
290 if (!path) {
291 ret = -ENOMEM;
292 goto out;
293 }
294
295 /* default this to quota off, in case no status key is found */
296 fs_info->qgroup_flags = 0;
297
298 /*
299 * pass 1: read status, all qgroup infos and limits
300 */
301 key.objectid = 0;
302 key.type = 0;
303 key.offset = 0;
304 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
305 if (ret)
306 goto out;
307
308 while (1) {
309 struct btrfs_qgroup *qgroup;
310
311 slot = path->slots[0];
312 l = path->nodes[0];
313 btrfs_item_key_to_cpu(l, &found_key, slot);
314
315 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
316 struct btrfs_qgroup_status_item *ptr;
317
318 ptr = btrfs_item_ptr(l, slot,
319 struct btrfs_qgroup_status_item);
320
321 if (btrfs_qgroup_status_version(l, ptr) !=
322 BTRFS_QGROUP_STATUS_VERSION) {
Frank Holtonefe120a2013-12-20 11:37:06 -0500323 btrfs_err(fs_info,
324 "old qgroup version, quota disabled");
Arne Jansenbed92ea2012-06-28 18:03:02 +0200325 goto out;
326 }
327 if (btrfs_qgroup_status_generation(l, ptr) !=
328 fs_info->generation) {
329 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
Frank Holtonefe120a2013-12-20 11:37:06 -0500330 btrfs_err(fs_info,
331 "qgroup generation mismatch, "
332 "marked as inconsistent");
Arne Jansenbed92ea2012-06-28 18:03:02 +0200333 }
334 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
335 ptr);
Jan Schmidtb382a322013-05-28 15:47:24 +0000336 rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200337 goto next1;
338 }
339
340 if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
341 found_key.type != BTRFS_QGROUP_LIMIT_KEY)
342 goto next1;
343
344 qgroup = find_qgroup_rb(fs_info, found_key.offset);
345 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
346 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
Frank Holtonefe120a2013-12-20 11:37:06 -0500347 btrfs_err(fs_info, "inconsitent qgroup config");
Arne Jansenbed92ea2012-06-28 18:03:02 +0200348 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
349 }
350 if (!qgroup) {
351 qgroup = add_qgroup_rb(fs_info, found_key.offset);
352 if (IS_ERR(qgroup)) {
353 ret = PTR_ERR(qgroup);
354 goto out;
355 }
356 }
357 switch (found_key.type) {
358 case BTRFS_QGROUP_INFO_KEY: {
359 struct btrfs_qgroup_info_item *ptr;
360
361 ptr = btrfs_item_ptr(l, slot,
362 struct btrfs_qgroup_info_item);
363 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
364 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
365 qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
366 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
367 /* generation currently unused */
368 break;
369 }
370 case BTRFS_QGROUP_LIMIT_KEY: {
371 struct btrfs_qgroup_limit_item *ptr;
372
373 ptr = btrfs_item_ptr(l, slot,
374 struct btrfs_qgroup_limit_item);
375 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
376 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
377 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
378 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
379 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
380 break;
381 }
382 }
383next1:
384 ret = btrfs_next_item(quota_root, path);
385 if (ret < 0)
386 goto out;
387 if (ret)
388 break;
389 }
390 btrfs_release_path(path);
391
392 /*
393 * pass 2: read all qgroup relations
394 */
395 key.objectid = 0;
396 key.type = BTRFS_QGROUP_RELATION_KEY;
397 key.offset = 0;
398 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
399 if (ret)
400 goto out;
401 while (1) {
402 slot = path->slots[0];
403 l = path->nodes[0];
404 btrfs_item_key_to_cpu(l, &found_key, slot);
405
406 if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
407 goto next2;
408
409 if (found_key.objectid > found_key.offset) {
410 /* parent <- member, not needed to build config */
411 /* FIXME should we omit the key completely? */
412 goto next2;
413 }
414
415 ret = add_relation_rb(fs_info, found_key.objectid,
416 found_key.offset);
Arne Jansenff248582013-01-17 01:22:08 -0700417 if (ret == -ENOENT) {
Frank Holtonefe120a2013-12-20 11:37:06 -0500418 btrfs_warn(fs_info,
419 "orphan qgroup relation 0x%llx->0x%llx",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200420 found_key.objectid, found_key.offset);
Arne Jansenff248582013-01-17 01:22:08 -0700421 ret = 0; /* ignore the error */
422 }
Arne Jansenbed92ea2012-06-28 18:03:02 +0200423 if (ret)
424 goto out;
425next2:
426 ret = btrfs_next_item(quota_root, path);
427 if (ret < 0)
428 goto out;
429 if (ret)
430 break;
431 }
432out:
433 fs_info->qgroup_flags |= flags;
434 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
435 fs_info->quota_enabled = 0;
436 fs_info->pending_quota_state = 0;
Jan Schmidtb382a322013-05-28 15:47:24 +0000437 } else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
438 ret >= 0) {
439 ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200440 }
441 btrfs_free_path(path);
442
Jan Schmidteb1716a2013-05-28 15:47:23 +0000443 if (ret < 0) {
Wang Shilong1e8f9152013-05-06 11:03:27 +0000444 ulist_free(fs_info->qgroup_ulist);
Jan Schmidteb1716a2013-05-28 15:47:23 +0000445 fs_info->qgroup_ulist = NULL;
Jan Schmidtb382a322013-05-28 15:47:24 +0000446 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
Jan Schmidteb1716a2013-05-28 15:47:23 +0000447 }
Wang Shilong1e8f9152013-05-06 11:03:27 +0000448
Arne Jansenbed92ea2012-06-28 18:03:02 +0200449 return ret < 0 ? ret : 0;
450}
451
452/*
Wang Shilonge685da12013-08-14 09:13:37 +0800453 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
454 * first two are in single-threaded paths.And for the third one, we have set
455 * quota_root to be null with qgroup_lock held before, so it is safe to clean
456 * up the in-memory structures without qgroup_lock held.
Arne Jansenbed92ea2012-06-28 18:03:02 +0200457 */
458void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
459{
460 struct rb_node *n;
461 struct btrfs_qgroup *qgroup;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200462
463 while ((n = rb_first(&fs_info->qgroup_tree))) {
464 qgroup = rb_entry(n, struct btrfs_qgroup, node);
465 rb_erase(n, &fs_info->qgroup_tree);
Wang Shilong4082bd32013-08-14 09:13:36 +0800466 __del_qgroup_rb(qgroup);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200467 }
Wang Shilong1e7bac12013-07-13 21:02:54 +0800468 /*
469 * we call btrfs_free_qgroup_config() when umounting
470 * filesystem and disabling quota, so we set qgroup_ulit
471 * to be null here to avoid double free.
472 */
Wang Shilong1e8f9152013-05-06 11:03:27 +0000473 ulist_free(fs_info->qgroup_ulist);
Wang Shilong1e7bac12013-07-13 21:02:54 +0800474 fs_info->qgroup_ulist = NULL;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200475}
476
477static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
478 struct btrfs_root *quota_root,
479 u64 src, u64 dst)
480{
481 int ret;
482 struct btrfs_path *path;
483 struct btrfs_key key;
484
485 path = btrfs_alloc_path();
486 if (!path)
487 return -ENOMEM;
488
489 key.objectid = src;
490 key.type = BTRFS_QGROUP_RELATION_KEY;
491 key.offset = dst;
492
493 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
494
495 btrfs_mark_buffer_dirty(path->nodes[0]);
496
497 btrfs_free_path(path);
498 return ret;
499}
500
501static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
502 struct btrfs_root *quota_root,
503 u64 src, u64 dst)
504{
505 int ret;
506 struct btrfs_path *path;
507 struct btrfs_key key;
508
509 path = btrfs_alloc_path();
510 if (!path)
511 return -ENOMEM;
512
513 key.objectid = src;
514 key.type = BTRFS_QGROUP_RELATION_KEY;
515 key.offset = dst;
516
517 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
518 if (ret < 0)
519 goto out;
520
521 if (ret > 0) {
522 ret = -ENOENT;
523 goto out;
524 }
525
526 ret = btrfs_del_item(trans, quota_root, path);
527out:
528 btrfs_free_path(path);
529 return ret;
530}
531
532static int add_qgroup_item(struct btrfs_trans_handle *trans,
533 struct btrfs_root *quota_root, u64 qgroupid)
534{
535 int ret;
536 struct btrfs_path *path;
537 struct btrfs_qgroup_info_item *qgroup_info;
538 struct btrfs_qgroup_limit_item *qgroup_limit;
539 struct extent_buffer *leaf;
540 struct btrfs_key key;
541
David Sterbafccb84c2014-09-29 23:53:21 +0200542 if (btrfs_test_is_dummy_root(quota_root))
Josef Bacikfaa2dbf2014-05-07 17:06:09 -0400543 return 0;
David Sterbafccb84c2014-09-29 23:53:21 +0200544
Arne Jansenbed92ea2012-06-28 18:03:02 +0200545 path = btrfs_alloc_path();
546 if (!path)
547 return -ENOMEM;
548
549 key.objectid = 0;
550 key.type = BTRFS_QGROUP_INFO_KEY;
551 key.offset = qgroupid;
552
Mark Fasheh0b4699d2014-08-18 14:01:17 -0700553 /*
554 * Avoid a transaction abort by catching -EEXIST here. In that
555 * case, we proceed by re-initializing the existing structure
556 * on disk.
557 */
558
Arne Jansenbed92ea2012-06-28 18:03:02 +0200559 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
560 sizeof(*qgroup_info));
Mark Fasheh0b4699d2014-08-18 14:01:17 -0700561 if (ret && ret != -EEXIST)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200562 goto out;
563
564 leaf = path->nodes[0];
565 qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
566 struct btrfs_qgroup_info_item);
567 btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
568 btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
569 btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
570 btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
571 btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
572
573 btrfs_mark_buffer_dirty(leaf);
574
575 btrfs_release_path(path);
576
577 key.type = BTRFS_QGROUP_LIMIT_KEY;
578 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
579 sizeof(*qgroup_limit));
Mark Fasheh0b4699d2014-08-18 14:01:17 -0700580 if (ret && ret != -EEXIST)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200581 goto out;
582
583 leaf = path->nodes[0];
584 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
585 struct btrfs_qgroup_limit_item);
586 btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
587 btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
588 btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
589 btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
590 btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
591
592 btrfs_mark_buffer_dirty(leaf);
593
594 ret = 0;
595out:
596 btrfs_free_path(path);
597 return ret;
598}
599
600static int del_qgroup_item(struct btrfs_trans_handle *trans,
601 struct btrfs_root *quota_root, u64 qgroupid)
602{
603 int ret;
604 struct btrfs_path *path;
605 struct btrfs_key key;
606
607 path = btrfs_alloc_path();
608 if (!path)
609 return -ENOMEM;
610
611 key.objectid = 0;
612 key.type = BTRFS_QGROUP_INFO_KEY;
613 key.offset = qgroupid;
614 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
615 if (ret < 0)
616 goto out;
617
618 if (ret > 0) {
619 ret = -ENOENT;
620 goto out;
621 }
622
623 ret = btrfs_del_item(trans, quota_root, path);
624 if (ret)
625 goto out;
626
627 btrfs_release_path(path);
628
629 key.type = BTRFS_QGROUP_LIMIT_KEY;
630 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
631 if (ret < 0)
632 goto out;
633
634 if (ret > 0) {
635 ret = -ENOENT;
636 goto out;
637 }
638
639 ret = btrfs_del_item(trans, quota_root, path);
640
641out:
642 btrfs_free_path(path);
643 return ret;
644}
645
646static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
Dongsheng Yang1510e712014-11-20 21:01:41 -0500647 struct btrfs_root *root,
648 struct btrfs_qgroup *qgroup)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200649{
650 struct btrfs_path *path;
651 struct btrfs_key key;
652 struct extent_buffer *l;
653 struct btrfs_qgroup_limit_item *qgroup_limit;
654 int ret;
655 int slot;
656
657 key.objectid = 0;
658 key.type = BTRFS_QGROUP_LIMIT_KEY;
Dongsheng Yang1510e712014-11-20 21:01:41 -0500659 key.offset = qgroup->qgroupid;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200660
661 path = btrfs_alloc_path();
Wang Shilong84cbe2f2013-02-27 11:20:56 +0000662 if (!path)
663 return -ENOMEM;
664
Arne Jansenbed92ea2012-06-28 18:03:02 +0200665 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
666 if (ret > 0)
667 ret = -ENOENT;
668
669 if (ret)
670 goto out;
671
672 l = path->nodes[0];
673 slot = path->slots[0];
Valentina Giustia3df41e2013-11-04 22:34:29 +0100674 qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
Dongsheng Yang1510e712014-11-20 21:01:41 -0500675 btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
676 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
677 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
678 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
679 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200680
681 btrfs_mark_buffer_dirty(l);
682
683out:
684 btrfs_free_path(path);
685 return ret;
686}
687
688static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
689 struct btrfs_root *root,
690 struct btrfs_qgroup *qgroup)
691{
692 struct btrfs_path *path;
693 struct btrfs_key key;
694 struct extent_buffer *l;
695 struct btrfs_qgroup_info_item *qgroup_info;
696 int ret;
697 int slot;
698
David Sterbafccb84c2014-09-29 23:53:21 +0200699 if (btrfs_test_is_dummy_root(root))
Josef Bacikfaa2dbf2014-05-07 17:06:09 -0400700 return 0;
David Sterbafccb84c2014-09-29 23:53:21 +0200701
Arne Jansenbed92ea2012-06-28 18:03:02 +0200702 key.objectid = 0;
703 key.type = BTRFS_QGROUP_INFO_KEY;
704 key.offset = qgroup->qgroupid;
705
706 path = btrfs_alloc_path();
Wang Shilong84cbe2f2013-02-27 11:20:56 +0000707 if (!path)
708 return -ENOMEM;
709
Arne Jansenbed92ea2012-06-28 18:03:02 +0200710 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
711 if (ret > 0)
712 ret = -ENOENT;
713
714 if (ret)
715 goto out;
716
717 l = path->nodes[0];
718 slot = path->slots[0];
Valentina Giustia3df41e2013-11-04 22:34:29 +0100719 qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200720 btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
721 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
722 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
723 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
724 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
725
726 btrfs_mark_buffer_dirty(l);
727
728out:
729 btrfs_free_path(path);
730 return ret;
731}
732
733static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
734 struct btrfs_fs_info *fs_info,
735 struct btrfs_root *root)
736{
737 struct btrfs_path *path;
738 struct btrfs_key key;
739 struct extent_buffer *l;
740 struct btrfs_qgroup_status_item *ptr;
741 int ret;
742 int slot;
743
744 key.objectid = 0;
745 key.type = BTRFS_QGROUP_STATUS_KEY;
746 key.offset = 0;
747
748 path = btrfs_alloc_path();
Wang Shilong84cbe2f2013-02-27 11:20:56 +0000749 if (!path)
750 return -ENOMEM;
751
Arne Jansenbed92ea2012-06-28 18:03:02 +0200752 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
753 if (ret > 0)
754 ret = -ENOENT;
755
756 if (ret)
757 goto out;
758
759 l = path->nodes[0];
760 slot = path->slots[0];
761 ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
762 btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
763 btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
Jan Schmidt2f232032013-04-25 16:04:51 +0000764 btrfs_set_qgroup_status_rescan(l, ptr,
765 fs_info->qgroup_rescan_progress.objectid);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200766
767 btrfs_mark_buffer_dirty(l);
768
769out:
770 btrfs_free_path(path);
771 return ret;
772}
773
774/*
775 * called with qgroup_lock held
776 */
777static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
778 struct btrfs_root *root)
779{
780 struct btrfs_path *path;
781 struct btrfs_key key;
Wang Shilong06b3a862013-02-27 11:16:57 +0000782 struct extent_buffer *leaf = NULL;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200783 int ret;
Wang Shilong06b3a862013-02-27 11:16:57 +0000784 int nr = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200785
Arne Jansenbed92ea2012-06-28 18:03:02 +0200786 path = btrfs_alloc_path();
787 if (!path)
788 return -ENOMEM;
789
Wang Shilong06b3a862013-02-27 11:16:57 +0000790 path->leave_spinning = 1;
791
792 key.objectid = 0;
793 key.offset = 0;
794 key.type = 0;
795
Arne Jansenbed92ea2012-06-28 18:03:02 +0200796 while (1) {
Arne Jansenbed92ea2012-06-28 18:03:02 +0200797 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
Wang Shilong06b3a862013-02-27 11:16:57 +0000798 if (ret < 0)
799 goto out;
800 leaf = path->nodes[0];
801 nr = btrfs_header_nritems(leaf);
802 if (!nr)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200803 break;
Wang Shilong06b3a862013-02-27 11:16:57 +0000804 /*
805 * delete the leaf one by one
806 * since the whole tree is going
807 * to be deleted.
808 */
809 path->slots[0] = 0;
810 ret = btrfs_del_items(trans, root, path, 0, nr);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200811 if (ret)
812 goto out;
Wang Shilong06b3a862013-02-27 11:16:57 +0000813
Arne Jansenbed92ea2012-06-28 18:03:02 +0200814 btrfs_release_path(path);
815 }
816 ret = 0;
817out:
818 root->fs_info->pending_quota_state = 0;
819 btrfs_free_path(path);
820 return ret;
821}
822
823int btrfs_quota_enable(struct btrfs_trans_handle *trans,
824 struct btrfs_fs_info *fs_info)
825{
826 struct btrfs_root *quota_root;
Wang Shilong7708f022013-04-07 10:24:57 +0000827 struct btrfs_root *tree_root = fs_info->tree_root;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200828 struct btrfs_path *path = NULL;
829 struct btrfs_qgroup_status_item *ptr;
830 struct extent_buffer *leaf;
831 struct btrfs_key key;
Wang Shilong7708f022013-04-07 10:24:57 +0000832 struct btrfs_key found_key;
833 struct btrfs_qgroup *qgroup = NULL;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200834 int ret = 0;
Wang Shilong7708f022013-04-07 10:24:57 +0000835 int slot;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200836
Wang Shilongf2f6ed32013-04-07 10:50:16 +0000837 mutex_lock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200838 if (fs_info->quota_root) {
839 fs_info->pending_quota_state = 1;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200840 goto out;
841 }
Arne Jansenbed92ea2012-06-28 18:03:02 +0200842
Wang Shilong1e8f9152013-05-06 11:03:27 +0000843 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
844 if (!fs_info->qgroup_ulist) {
845 ret = -ENOMEM;
846 goto out;
847 }
848
Arne Jansenbed92ea2012-06-28 18:03:02 +0200849 /*
850 * initially create the quota tree
851 */
852 quota_root = btrfs_create_tree(trans, fs_info,
853 BTRFS_QUOTA_TREE_OBJECTID);
854 if (IS_ERR(quota_root)) {
855 ret = PTR_ERR(quota_root);
856 goto out;
857 }
858
859 path = btrfs_alloc_path();
Tsutomu Itoh5b7ff5b2012-10-16 05:44:21 +0000860 if (!path) {
861 ret = -ENOMEM;
862 goto out_free_root;
863 }
Arne Jansenbed92ea2012-06-28 18:03:02 +0200864
865 key.objectid = 0;
866 key.type = BTRFS_QGROUP_STATUS_KEY;
867 key.offset = 0;
868
869 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
870 sizeof(*ptr));
871 if (ret)
Tsutomu Itoh5b7ff5b2012-10-16 05:44:21 +0000872 goto out_free_path;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200873
874 leaf = path->nodes[0];
875 ptr = btrfs_item_ptr(leaf, path->slots[0],
876 struct btrfs_qgroup_status_item);
877 btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
878 btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
879 fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
880 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
881 btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
Jan Schmidt2f232032013-04-25 16:04:51 +0000882 btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200883
884 btrfs_mark_buffer_dirty(leaf);
885
Wang Shilong7708f022013-04-07 10:24:57 +0000886 key.objectid = 0;
887 key.type = BTRFS_ROOT_REF_KEY;
888 key.offset = 0;
889
890 btrfs_release_path(path);
891 ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
892 if (ret > 0)
893 goto out_add_root;
894 if (ret < 0)
895 goto out_free_path;
896
897
898 while (1) {
899 slot = path->slots[0];
900 leaf = path->nodes[0];
901 btrfs_item_key_to_cpu(leaf, &found_key, slot);
902
903 if (found_key.type == BTRFS_ROOT_REF_KEY) {
904 ret = add_qgroup_item(trans, quota_root,
905 found_key.offset);
906 if (ret)
907 goto out_free_path;
908
Wang Shilong7708f022013-04-07 10:24:57 +0000909 qgroup = add_qgroup_rb(fs_info, found_key.offset);
910 if (IS_ERR(qgroup)) {
Wang Shilong7708f022013-04-07 10:24:57 +0000911 ret = PTR_ERR(qgroup);
912 goto out_free_path;
913 }
Wang Shilong7708f022013-04-07 10:24:57 +0000914 }
915 ret = btrfs_next_item(tree_root, path);
916 if (ret < 0)
917 goto out_free_path;
918 if (ret)
919 break;
920 }
921
922out_add_root:
923 btrfs_release_path(path);
924 ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
925 if (ret)
926 goto out_free_path;
927
Wang Shilong7708f022013-04-07 10:24:57 +0000928 qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
929 if (IS_ERR(qgroup)) {
Wang Shilong7708f022013-04-07 10:24:57 +0000930 ret = PTR_ERR(qgroup);
931 goto out_free_path;
932 }
Wang Shilong58400fc2013-04-07 10:50:17 +0000933 spin_lock(&fs_info->qgroup_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200934 fs_info->quota_root = quota_root;
935 fs_info->pending_quota_state = 1;
936 spin_unlock(&fs_info->qgroup_lock);
Tsutomu Itoh5b7ff5b2012-10-16 05:44:21 +0000937out_free_path:
Arne Jansenbed92ea2012-06-28 18:03:02 +0200938 btrfs_free_path(path);
Tsutomu Itoh5b7ff5b2012-10-16 05:44:21 +0000939out_free_root:
940 if (ret) {
941 free_extent_buffer(quota_root->node);
942 free_extent_buffer(quota_root->commit_root);
943 kfree(quota_root);
944 }
945out:
Jan Schmidteb1716a2013-05-28 15:47:23 +0000946 if (ret) {
Wang Shilong1e8f9152013-05-06 11:03:27 +0000947 ulist_free(fs_info->qgroup_ulist);
Jan Schmidteb1716a2013-05-28 15:47:23 +0000948 fs_info->qgroup_ulist = NULL;
949 }
Wang Shilongf2f6ed32013-04-07 10:50:16 +0000950 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200951 return ret;
952}
953
954int btrfs_quota_disable(struct btrfs_trans_handle *trans,
955 struct btrfs_fs_info *fs_info)
956{
957 struct btrfs_root *tree_root = fs_info->tree_root;
958 struct btrfs_root *quota_root;
959 int ret = 0;
960
Wang Shilongf2f6ed32013-04-07 10:50:16 +0000961 mutex_lock(&fs_info->qgroup_ioctl_lock);
Wang Shilong58400fc2013-04-07 10:50:17 +0000962 if (!fs_info->quota_root)
Wang Shilongf2f6ed32013-04-07 10:50:16 +0000963 goto out;
Wang Shilong58400fc2013-04-07 10:50:17 +0000964 spin_lock(&fs_info->qgroup_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200965 fs_info->quota_enabled = 0;
966 fs_info->pending_quota_state = 0;
967 quota_root = fs_info->quota_root;
968 fs_info->quota_root = NULL;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200969 spin_unlock(&fs_info->qgroup_lock);
970
Wang Shilonge685da12013-08-14 09:13:37 +0800971 btrfs_free_qgroup_config(fs_info);
972
Arne Jansenbed92ea2012-06-28 18:03:02 +0200973 ret = btrfs_clean_quota_tree(trans, quota_root);
974 if (ret)
975 goto out;
976
977 ret = btrfs_del_root(trans, tree_root, &quota_root->root_key);
978 if (ret)
979 goto out;
980
981 list_del(&quota_root->dirty_list);
982
983 btrfs_tree_lock(quota_root->node);
Daniel Dressler01d58472014-11-21 17:15:07 +0900984 clean_tree_block(trans, tree_root->fs_info, quota_root->node);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200985 btrfs_tree_unlock(quota_root->node);
986 btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
987
988 free_extent_buffer(quota_root->node);
989 free_extent_buffer(quota_root->commit_root);
990 kfree(quota_root);
991out:
Wang Shilongf2f6ed32013-04-07 10:50:16 +0000992 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200993 return ret;
994}
995
Jan Schmidt2f232032013-04-25 16:04:51 +0000996static void qgroup_dirty(struct btrfs_fs_info *fs_info,
997 struct btrfs_qgroup *qgroup)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200998{
Jan Schmidt2f232032013-04-25 16:04:51 +0000999 if (list_empty(&qgroup->dirty))
1000 list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001001}
1002
1003int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
1004 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1005{
1006 struct btrfs_root *quota_root;
Wang Shilongb7fef4f2013-04-07 10:50:18 +00001007 struct btrfs_qgroup *parent;
1008 struct btrfs_qgroup *member;
Wang Shilong534e6622013-04-17 14:49:51 +00001009 struct btrfs_qgroup_list *list;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001010 int ret = 0;
1011
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001012 mutex_lock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001013 quota_root = fs_info->quota_root;
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001014 if (!quota_root) {
1015 ret = -EINVAL;
1016 goto out;
1017 }
Wang Shilongb7fef4f2013-04-07 10:50:18 +00001018 member = find_qgroup_rb(fs_info, src);
1019 parent = find_qgroup_rb(fs_info, dst);
1020 if (!member || !parent) {
1021 ret = -EINVAL;
1022 goto out;
1023 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001024
Wang Shilong534e6622013-04-17 14:49:51 +00001025 /* check if such qgroup relation exist firstly */
1026 list_for_each_entry(list, &member->groups, next_group) {
1027 if (list->group == parent) {
1028 ret = -EEXIST;
1029 goto out;
1030 }
1031 }
1032
Arne Jansenbed92ea2012-06-28 18:03:02 +02001033 ret = add_qgroup_relation_item(trans, quota_root, src, dst);
1034 if (ret)
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001035 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001036
1037 ret = add_qgroup_relation_item(trans, quota_root, dst, src);
1038 if (ret) {
1039 del_qgroup_relation_item(trans, quota_root, src, dst);
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001040 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001041 }
1042
1043 spin_lock(&fs_info->qgroup_lock);
1044 ret = add_relation_rb(quota_root->fs_info, src, dst);
1045 spin_unlock(&fs_info->qgroup_lock);
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001046out:
1047 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001048 return ret;
1049}
1050
1051int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
1052 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1053{
1054 struct btrfs_root *quota_root;
Wang Shilong534e6622013-04-17 14:49:51 +00001055 struct btrfs_qgroup *parent;
1056 struct btrfs_qgroup *member;
1057 struct btrfs_qgroup_list *list;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001058 int ret = 0;
1059 int err;
1060
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001061 mutex_lock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001062 quota_root = fs_info->quota_root;
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001063 if (!quota_root) {
1064 ret = -EINVAL;
1065 goto out;
1066 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001067
Wang Shilong534e6622013-04-17 14:49:51 +00001068 member = find_qgroup_rb(fs_info, src);
1069 parent = find_qgroup_rb(fs_info, dst);
1070 if (!member || !parent) {
1071 ret = -EINVAL;
1072 goto out;
1073 }
1074
1075 /* check if such qgroup relation exist firstly */
1076 list_for_each_entry(list, &member->groups, next_group) {
1077 if (list->group == parent)
1078 goto exist;
1079 }
1080 ret = -ENOENT;
1081 goto out;
1082exist:
Arne Jansenbed92ea2012-06-28 18:03:02 +02001083 ret = del_qgroup_relation_item(trans, quota_root, src, dst);
1084 err = del_qgroup_relation_item(trans, quota_root, dst, src);
1085 if (err && !ret)
1086 ret = err;
1087
1088 spin_lock(&fs_info->qgroup_lock);
1089 del_relation_rb(fs_info, src, dst);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001090 spin_unlock(&fs_info->qgroup_lock);
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001091out:
1092 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001093 return ret;
1094}
1095
1096int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
Dongsheng Yang4087cf22015-01-18 10:59:23 -05001097 struct btrfs_fs_info *fs_info, u64 qgroupid)
Arne Jansenbed92ea2012-06-28 18:03:02 +02001098{
1099 struct btrfs_root *quota_root;
1100 struct btrfs_qgroup *qgroup;
1101 int ret = 0;
1102
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001103 mutex_lock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001104 quota_root = fs_info->quota_root;
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001105 if (!quota_root) {
1106 ret = -EINVAL;
1107 goto out;
1108 }
Wang Shilong534e6622013-04-17 14:49:51 +00001109 qgroup = find_qgroup_rb(fs_info, qgroupid);
1110 if (qgroup) {
1111 ret = -EEXIST;
1112 goto out;
1113 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001114
1115 ret = add_qgroup_item(trans, quota_root, qgroupid);
Wang Shilong534e6622013-04-17 14:49:51 +00001116 if (ret)
1117 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001118
1119 spin_lock(&fs_info->qgroup_lock);
1120 qgroup = add_qgroup_rb(fs_info, qgroupid);
1121 spin_unlock(&fs_info->qgroup_lock);
1122
1123 if (IS_ERR(qgroup))
1124 ret = PTR_ERR(qgroup);
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001125out:
1126 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001127 return ret;
1128}
1129
1130int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
1131 struct btrfs_fs_info *fs_info, u64 qgroupid)
1132{
1133 struct btrfs_root *quota_root;
Arne Jansen2cf68702013-01-17 01:22:09 -07001134 struct btrfs_qgroup *qgroup;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001135 int ret = 0;
1136
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001137 mutex_lock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001138 quota_root = fs_info->quota_root;
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001139 if (!quota_root) {
1140 ret = -EINVAL;
1141 goto out;
1142 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001143
Arne Jansen2cf68702013-01-17 01:22:09 -07001144 qgroup = find_qgroup_rb(fs_info, qgroupid);
Wang Shilong534e6622013-04-17 14:49:51 +00001145 if (!qgroup) {
1146 ret = -ENOENT;
1147 goto out;
1148 } else {
1149 /* check if there are no relations to this qgroup */
1150 if (!list_empty(&qgroup->groups) ||
1151 !list_empty(&qgroup->members)) {
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001152 ret = -EBUSY;
1153 goto out;
Arne Jansen2cf68702013-01-17 01:22:09 -07001154 }
1155 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001156 ret = del_qgroup_item(trans, quota_root, qgroupid);
1157
1158 spin_lock(&fs_info->qgroup_lock);
1159 del_qgroup_rb(quota_root->fs_info, qgroupid);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001160 spin_unlock(&fs_info->qgroup_lock);
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001161out:
1162 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001163 return ret;
1164}
1165
1166int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1167 struct btrfs_fs_info *fs_info, u64 qgroupid,
1168 struct btrfs_qgroup_limit *limit)
1169{
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001170 struct btrfs_root *quota_root;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001171 struct btrfs_qgroup *qgroup;
1172 int ret = 0;
1173
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001174 mutex_lock(&fs_info->qgroup_ioctl_lock);
1175 quota_root = fs_info->quota_root;
1176 if (!quota_root) {
1177 ret = -EINVAL;
1178 goto out;
1179 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001180
Wang Shilongddb47af2013-04-07 10:50:20 +00001181 qgroup = find_qgroup_rb(fs_info, qgroupid);
1182 if (!qgroup) {
1183 ret = -ENOENT;
1184 goto out;
1185 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001186
Wang Shilong58400fc2013-04-07 10:50:17 +00001187 spin_lock(&fs_info->qgroup_lock);
Dongsheng Yang03477d92015-02-06 11:06:25 -05001188 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER)
1189 qgroup->max_rfer = limit->max_rfer;
1190 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL)
1191 qgroup->max_excl = limit->max_excl;
1192 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER)
1193 qgroup->rsv_rfer = limit->rsv_rfer;
1194 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL)
1195 qgroup->rsv_excl = limit->rsv_excl;
1196 qgroup->lim_flags |= limit->flags;
1197
Arne Jansenbed92ea2012-06-28 18:03:02 +02001198 spin_unlock(&fs_info->qgroup_lock);
Dongsheng Yang1510e712014-11-20 21:01:41 -05001199
1200 ret = update_qgroup_limit_item(trans, quota_root, qgroup);
1201 if (ret) {
1202 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1203 btrfs_info(fs_info, "unable to update quota limit for %llu",
1204 qgroupid);
1205 }
1206
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001207out:
1208 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001209 return ret;
1210}
Mark Fasheh11526512014-07-17 12:39:01 -07001211
1212static int comp_oper_exist(struct btrfs_qgroup_operation *oper1,
1213 struct btrfs_qgroup_operation *oper2)
1214{
1215 /*
1216 * Ignore seq and type here, we're looking for any operation
1217 * at all related to this extent on that root.
1218 */
1219 if (oper1->bytenr < oper2->bytenr)
1220 return -1;
1221 if (oper1->bytenr > oper2->bytenr)
1222 return 1;
1223 if (oper1->ref_root < oper2->ref_root)
1224 return -1;
1225 if (oper1->ref_root > oper2->ref_root)
1226 return 1;
1227 return 0;
1228}
1229
1230static int qgroup_oper_exists(struct btrfs_fs_info *fs_info,
1231 struct btrfs_qgroup_operation *oper)
1232{
1233 struct rb_node *n;
1234 struct btrfs_qgroup_operation *cur;
1235 int cmp;
1236
1237 spin_lock(&fs_info->qgroup_op_lock);
1238 n = fs_info->qgroup_op_tree.rb_node;
1239 while (n) {
1240 cur = rb_entry(n, struct btrfs_qgroup_operation, n);
1241 cmp = comp_oper_exist(cur, oper);
1242 if (cmp < 0) {
1243 n = n->rb_right;
1244 } else if (cmp) {
1245 n = n->rb_left;
1246 } else {
1247 spin_unlock(&fs_info->qgroup_op_lock);
1248 return -EEXIST;
1249 }
1250 }
1251 spin_unlock(&fs_info->qgroup_op_lock);
1252 return 0;
1253}
1254
Josef Bacikfcebe452014-05-13 17:30:47 -07001255static int comp_oper(struct btrfs_qgroup_operation *oper1,
1256 struct btrfs_qgroup_operation *oper2)
1257{
1258 if (oper1->bytenr < oper2->bytenr)
1259 return -1;
1260 if (oper1->bytenr > oper2->bytenr)
1261 return 1;
Josef Bacikfcebe452014-05-13 17:30:47 -07001262 if (oper1->ref_root < oper2->ref_root)
1263 return -1;
1264 if (oper1->ref_root > oper2->ref_root)
1265 return 1;
Filipe Mananabf691962015-03-14 07:03:27 +00001266 if (oper1->seq < oper2->seq)
1267 return -1;
1268 if (oper1->seq > oper2->seq)
1269 return 1;
Josef Bacikfcebe452014-05-13 17:30:47 -07001270 if (oper1->type < oper2->type)
1271 return -1;
1272 if (oper1->type > oper2->type)
1273 return 1;
1274 return 0;
1275}
1276
1277static int insert_qgroup_oper(struct btrfs_fs_info *fs_info,
1278 struct btrfs_qgroup_operation *oper)
1279{
1280 struct rb_node **p;
1281 struct rb_node *parent = NULL;
1282 struct btrfs_qgroup_operation *cur;
1283 int cmp;
1284
1285 spin_lock(&fs_info->qgroup_op_lock);
1286 p = &fs_info->qgroup_op_tree.rb_node;
1287 while (*p) {
1288 parent = *p;
1289 cur = rb_entry(parent, struct btrfs_qgroup_operation, n);
1290 cmp = comp_oper(cur, oper);
1291 if (cmp < 0) {
1292 p = &(*p)->rb_right;
1293 } else if (cmp) {
1294 p = &(*p)->rb_left;
1295 } else {
1296 spin_unlock(&fs_info->qgroup_op_lock);
1297 return -EEXIST;
1298 }
1299 }
1300 rb_link_node(&oper->n, parent, p);
1301 rb_insert_color(&oper->n, &fs_info->qgroup_op_tree);
1302 spin_unlock(&fs_info->qgroup_op_lock);
1303 return 0;
1304}
Arne Jansenbed92ea2012-06-28 18:03:02 +02001305
Arne Jansenbed92ea2012-06-28 18:03:02 +02001306/*
Josef Bacikfcebe452014-05-13 17:30:47 -07001307 * Record a quota operation for processing later on.
1308 * @trans: the transaction we are adding the delayed op to.
1309 * @fs_info: the fs_info for this fs.
1310 * @ref_root: the root of the reference we are acting on,
1311 * @bytenr: the bytenr we are acting on.
1312 * @num_bytes: the number of bytes in the reference.
1313 * @type: the type of operation this is.
1314 * @mod_seq: do we need to get a sequence number for looking up roots.
1315 *
1316 * We just add it to our trans qgroup_ref_list and carry on and process these
1317 * operations in order at some later point. If the reference root isn't a fs
1318 * root then we don't bother with doing anything.
1319 *
1320 * MUST BE HOLDING THE REF LOCK.
Arne Jansenbed92ea2012-06-28 18:03:02 +02001321 */
1322int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
Josef Bacikfcebe452014-05-13 17:30:47 -07001323 struct btrfs_fs_info *fs_info, u64 ref_root,
1324 u64 bytenr, u64 num_bytes,
1325 enum btrfs_qgroup_operation_type type, int mod_seq)
Arne Jansenbed92ea2012-06-28 18:03:02 +02001326{
Josef Bacikfcebe452014-05-13 17:30:47 -07001327 struct btrfs_qgroup_operation *oper;
1328 int ret;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001329
Josef Bacikfcebe452014-05-13 17:30:47 -07001330 if (!is_fstree(ref_root) || !fs_info->quota_enabled)
1331 return 0;
1332
1333 oper = kmalloc(sizeof(*oper), GFP_NOFS);
1334 if (!oper)
Arne Jansenbed92ea2012-06-28 18:03:02 +02001335 return -ENOMEM;
1336
Josef Bacikfcebe452014-05-13 17:30:47 -07001337 oper->ref_root = ref_root;
1338 oper->bytenr = bytenr;
1339 oper->num_bytes = num_bytes;
1340 oper->type = type;
1341 oper->seq = atomic_inc_return(&fs_info->qgroup_op_seq);
1342 INIT_LIST_HEAD(&oper->elem.list);
1343 oper->elem.seq = 0;
Mark Fasheh11526512014-07-17 12:39:01 -07001344
Mark Fashehd3982102014-07-17 12:39:00 -07001345 trace_btrfs_qgroup_record_ref(oper);
1346
Mark Fasheh11526512014-07-17 12:39:01 -07001347 if (type == BTRFS_QGROUP_OPER_SUB_SUBTREE) {
1348 /*
1349 * If any operation for this bytenr/ref_root combo
1350 * exists, then we know it's not exclusively owned and
1351 * shouldn't be queued up.
1352 *
1353 * This also catches the case where we have a cloned
1354 * extent that gets queued up multiple times during
1355 * drop snapshot.
1356 */
1357 if (qgroup_oper_exists(fs_info, oper)) {
1358 kfree(oper);
1359 return 0;
1360 }
1361 }
1362
Josef Bacikfcebe452014-05-13 17:30:47 -07001363 ret = insert_qgroup_oper(fs_info, oper);
1364 if (ret) {
1365 /* Shouldn't happen so have an assert for developers */
1366 ASSERT(0);
1367 kfree(oper);
1368 return ret;
1369 }
1370 list_add_tail(&oper->list, &trans->qgroup_ref_list);
1371
1372 if (mod_seq)
1373 btrfs_get_tree_mod_seq(fs_info, &oper->elem);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001374
1375 return 0;
1376}
1377
Josef Bacikfcebe452014-05-13 17:30:47 -07001378/*
1379 * The easy accounting, if we are adding/removing the only ref for an extent
1380 * then this qgroup and all of the parent qgroups get their refrence and
1381 * exclusive counts adjusted.
1382 */
1383static int qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1384 struct btrfs_qgroup_operation *oper)
1385{
1386 struct btrfs_qgroup *qgroup;
1387 struct ulist *tmp;
1388 struct btrfs_qgroup_list *glist;
1389 struct ulist_node *unode;
1390 struct ulist_iterator uiter;
1391 int sign = 0;
1392 int ret = 0;
1393
1394 tmp = ulist_alloc(GFP_NOFS);
1395 if (!tmp)
1396 return -ENOMEM;
1397
1398 spin_lock(&fs_info->qgroup_lock);
1399 if (!fs_info->quota_root)
1400 goto out;
1401 qgroup = find_qgroup_rb(fs_info, oper->ref_root);
1402 if (!qgroup)
1403 goto out;
1404 switch (oper->type) {
1405 case BTRFS_QGROUP_OPER_ADD_EXCL:
1406 sign = 1;
1407 break;
1408 case BTRFS_QGROUP_OPER_SUB_EXCL:
1409 sign = -1;
1410 break;
1411 default:
1412 ASSERT(0);
1413 }
1414 qgroup->rfer += sign * oper->num_bytes;
1415 qgroup->rfer_cmpr += sign * oper->num_bytes;
1416
1417 WARN_ON(sign < 0 && qgroup->excl < oper->num_bytes);
1418 qgroup->excl += sign * oper->num_bytes;
1419 qgroup->excl_cmpr += sign * oper->num_bytes;
1420
1421 qgroup_dirty(fs_info, qgroup);
1422
1423 /* Get all of the parent groups that contain this qgroup */
1424 list_for_each_entry(glist, &qgroup->groups, next_group) {
1425 ret = ulist_add(tmp, glist->group->qgroupid,
1426 ptr_to_u64(glist->group), GFP_ATOMIC);
1427 if (ret < 0)
1428 goto out;
1429 }
1430
1431 /* Iterate all of the parents and adjust their reference counts */
1432 ULIST_ITER_INIT(&uiter);
1433 while ((unode = ulist_next(tmp, &uiter))) {
1434 qgroup = u64_to_ptr(unode->aux);
1435 qgroup->rfer += sign * oper->num_bytes;
1436 qgroup->rfer_cmpr += sign * oper->num_bytes;
Yang Dongsheng0ee13fe2015-01-06 20:54:42 +08001437 WARN_ON(sign < 0 && qgroup->excl < oper->num_bytes);
Josef Bacikfcebe452014-05-13 17:30:47 -07001438 qgroup->excl += sign * oper->num_bytes;
Josef Bacikfcebe452014-05-13 17:30:47 -07001439 qgroup->excl_cmpr += sign * oper->num_bytes;
1440 qgroup_dirty(fs_info, qgroup);
1441
1442 /* Add any parents of the parents */
1443 list_for_each_entry(glist, &qgroup->groups, next_group) {
1444 ret = ulist_add(tmp, glist->group->qgroupid,
1445 ptr_to_u64(glist->group), GFP_ATOMIC);
1446 if (ret < 0)
1447 goto out;
1448 }
1449 }
1450 ret = 0;
1451out:
1452 spin_unlock(&fs_info->qgroup_lock);
1453 ulist_free(tmp);
1454 return ret;
1455}
1456
1457/*
1458 * Walk all of the roots that pointed to our bytenr and adjust their refcnts as
1459 * properly.
1460 */
1461static int qgroup_calc_old_refcnt(struct btrfs_fs_info *fs_info,
1462 u64 root_to_skip, struct ulist *tmp,
1463 struct ulist *roots, struct ulist *qgroups,
1464 u64 seq, int *old_roots, int rescan)
Jan Schmidt46b665c2013-04-25 16:04:50 +00001465{
1466 struct ulist_node *unode;
1467 struct ulist_iterator uiter;
1468 struct ulist_node *tmp_unode;
1469 struct ulist_iterator tmp_uiter;
1470 struct btrfs_qgroup *qg;
1471 int ret;
1472
1473 ULIST_ITER_INIT(&uiter);
1474 while ((unode = ulist_next(roots, &uiter))) {
Josef Bacikfcebe452014-05-13 17:30:47 -07001475 /* We don't count our current root here */
1476 if (unode->val == root_to_skip)
1477 continue;
Jan Schmidt46b665c2013-04-25 16:04:50 +00001478 qg = find_qgroup_rb(fs_info, unode->val);
1479 if (!qg)
1480 continue;
Josef Bacikfcebe452014-05-13 17:30:47 -07001481 /*
1482 * We could have a pending removal of this same ref so we may
1483 * not have actually found our ref root when doing
1484 * btrfs_find_all_roots, so we need to keep track of how many
1485 * old roots we find in case we removed ours and added a
1486 * different one at the same time. I don't think this could
1487 * happen in practice but that sort of thinking leads to pain
1488 * and suffering and to the dark side.
1489 */
1490 (*old_roots)++;
Jan Schmidt46b665c2013-04-25 16:04:50 +00001491
1492 ulist_reinit(tmp);
Josef Bacikfcebe452014-05-13 17:30:47 -07001493 ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
1494 GFP_ATOMIC);
1495 if (ret < 0)
1496 return ret;
1497 ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg), GFP_ATOMIC);
Jan Schmidt46b665c2013-04-25 16:04:50 +00001498 if (ret < 0)
1499 return ret;
1500 ULIST_ITER_INIT(&tmp_uiter);
1501 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1502 struct btrfs_qgroup_list *glist;
1503
Josef Bacikfcebe452014-05-13 17:30:47 -07001504 qg = u64_to_ptr(tmp_unode->aux);
1505 /*
1506 * We use this sequence number to keep from having to
1507 * run the whole list and 0 out the refcnt every time.
1508 * We basically use sequnce as the known 0 count and
1509 * then add 1 everytime we see a qgroup. This is how we
1510 * get how many of the roots actually point up to the
1511 * upper level qgroups in order to determine exclusive
1512 * counts.
1513 *
1514 * For rescan we want to set old_refcnt to seq so our
1515 * exclusive calculations end up correct.
1516 */
1517 if (rescan)
1518 qg->old_refcnt = seq;
1519 else if (qg->old_refcnt < seq)
1520 qg->old_refcnt = seq + 1;
Jan Schmidt46b665c2013-04-25 16:04:50 +00001521 else
Josef Bacikfcebe452014-05-13 17:30:47 -07001522 qg->old_refcnt++;
Jan Schmidt46b665c2013-04-25 16:04:50 +00001523
Josef Bacikfcebe452014-05-13 17:30:47 -07001524 if (qg->new_refcnt < seq)
1525 qg->new_refcnt = seq + 1;
1526 else
1527 qg->new_refcnt++;
Jan Schmidt46b665c2013-04-25 16:04:50 +00001528 list_for_each_entry(glist, &qg->groups, next_group) {
Josef Bacikfcebe452014-05-13 17:30:47 -07001529 ret = ulist_add(qgroups, glist->group->qgroupid,
1530 ptr_to_u64(glist->group),
1531 GFP_ATOMIC);
1532 if (ret < 0)
1533 return ret;
Jan Schmidt46b665c2013-04-25 16:04:50 +00001534 ret = ulist_add(tmp, glist->group->qgroupid,
Josef Bacikfcebe452014-05-13 17:30:47 -07001535 ptr_to_u64(glist->group),
Jan Schmidt46b665c2013-04-25 16:04:50 +00001536 GFP_ATOMIC);
1537 if (ret < 0)
1538 return ret;
1539 }
1540 }
1541 }
Jan Schmidt46b665c2013-04-25 16:04:50 +00001542 return 0;
1543}
1544
Josef Bacikfcebe452014-05-13 17:30:47 -07001545/*
1546 * We need to walk forward in our operation tree and account for any roots that
1547 * were deleted after we made this operation.
1548 */
1549static int qgroup_account_deleted_refs(struct btrfs_fs_info *fs_info,
1550 struct btrfs_qgroup_operation *oper,
1551 struct ulist *tmp,
1552 struct ulist *qgroups, u64 seq,
1553 int *old_roots)
Jan Schmidt46b665c2013-04-25 16:04:50 +00001554{
1555 struct ulist_node *unode;
1556 struct ulist_iterator uiter;
1557 struct btrfs_qgroup *qg;
Josef Bacikfcebe452014-05-13 17:30:47 -07001558 struct btrfs_qgroup_operation *tmp_oper;
1559 struct rb_node *n;
Jan Schmidt46b665c2013-04-25 16:04:50 +00001560 int ret;
1561
1562 ulist_reinit(tmp);
Jan Schmidt46b665c2013-04-25 16:04:50 +00001563
Josef Bacikfcebe452014-05-13 17:30:47 -07001564 /*
1565 * We only walk forward in the tree since we're only interested in
1566 * removals that happened _after_ our operation.
1567 */
1568 spin_lock(&fs_info->qgroup_op_lock);
1569 n = rb_next(&oper->n);
1570 spin_unlock(&fs_info->qgroup_op_lock);
1571 if (!n)
1572 return 0;
1573 tmp_oper = rb_entry(n, struct btrfs_qgroup_operation, n);
1574 while (tmp_oper->bytenr == oper->bytenr) {
1575 /*
1576 * If it's not a removal we don't care, additions work out
1577 * properly with our refcnt tracking.
1578 */
1579 if (tmp_oper->type != BTRFS_QGROUP_OPER_SUB_SHARED &&
1580 tmp_oper->type != BTRFS_QGROUP_OPER_SUB_EXCL)
1581 goto next;
1582 qg = find_qgroup_rb(fs_info, tmp_oper->ref_root);
1583 if (!qg)
1584 goto next;
1585 ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
1586 GFP_ATOMIC);
1587 if (ret) {
1588 if (ret < 0)
1589 return ret;
1590 /*
1591 * We only want to increase old_roots if this qgroup is
1592 * not already in the list of qgroups. If it is already
1593 * there then that means it must have been re-added or
1594 * the delete will be discarded because we had an
1595 * existing ref that we haven't looked up yet. In this
1596 * case we don't want to increase old_roots. So if ret
1597 * == 1 then we know that this is the first time we've
1598 * seen this qgroup and we can bump the old_roots.
1599 */
1600 (*old_roots)++;
1601 ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg),
1602 GFP_ATOMIC);
1603 if (ret < 0)
1604 return ret;
1605 }
1606next:
1607 spin_lock(&fs_info->qgroup_op_lock);
1608 n = rb_next(&tmp_oper->n);
1609 spin_unlock(&fs_info->qgroup_op_lock);
1610 if (!n)
1611 break;
1612 tmp_oper = rb_entry(n, struct btrfs_qgroup_operation, n);
1613 }
1614
1615 /* Ok now process the qgroups we found */
Jan Schmidt46b665c2013-04-25 16:04:50 +00001616 ULIST_ITER_INIT(&uiter);
1617 while ((unode = ulist_next(tmp, &uiter))) {
Josef Bacikfcebe452014-05-13 17:30:47 -07001618 struct btrfs_qgroup_list *glist;
Jan Schmidt46b665c2013-04-25 16:04:50 +00001619
Josef Bacikfcebe452014-05-13 17:30:47 -07001620 qg = u64_to_ptr(unode->aux);
1621 if (qg->old_refcnt < seq)
1622 qg->old_refcnt = seq + 1;
1623 else
1624 qg->old_refcnt++;
1625 if (qg->new_refcnt < seq)
1626 qg->new_refcnt = seq + 1;
1627 else
1628 qg->new_refcnt++;
Jan Schmidt46b665c2013-04-25 16:04:50 +00001629 list_for_each_entry(glist, &qg->groups, next_group) {
Josef Bacikfcebe452014-05-13 17:30:47 -07001630 ret = ulist_add(qgroups, glist->group->qgroupid,
1631 ptr_to_u64(glist->group), GFP_ATOMIC);
1632 if (ret < 0)
1633 return ret;
Jan Schmidt46b665c2013-04-25 16:04:50 +00001634 ret = ulist_add(tmp, glist->group->qgroupid,
Josef Bacikfcebe452014-05-13 17:30:47 -07001635 ptr_to_u64(glist->group), GFP_ATOMIC);
Jan Schmidt46b665c2013-04-25 16:04:50 +00001636 if (ret < 0)
1637 return ret;
1638 }
1639 }
Jan Schmidt46b665c2013-04-25 16:04:50 +00001640 return 0;
1641}
1642
Josef Bacikfcebe452014-05-13 17:30:47 -07001643/* Add refcnt for the newly added reference. */
1644static int qgroup_calc_new_refcnt(struct btrfs_fs_info *fs_info,
1645 struct btrfs_qgroup_operation *oper,
1646 struct btrfs_qgroup *qgroup,
1647 struct ulist *tmp, struct ulist *qgroups,
1648 u64 seq)
Jan Schmidt46b665c2013-04-25 16:04:50 +00001649{
1650 struct ulist_node *unode;
1651 struct ulist_iterator uiter;
1652 struct btrfs_qgroup *qg;
Jan Schmidt46b665c2013-04-25 16:04:50 +00001653 int ret;
1654
Josef Bacikfcebe452014-05-13 17:30:47 -07001655 ulist_reinit(tmp);
1656 ret = ulist_add(qgroups, qgroup->qgroupid, ptr_to_u64(qgroup),
1657 GFP_ATOMIC);
1658 if (ret < 0)
1659 return ret;
1660 ret = ulist_add(tmp, qgroup->qgroupid, ptr_to_u64(qgroup),
1661 GFP_ATOMIC);
1662 if (ret < 0)
1663 return ret;
1664 ULIST_ITER_INIT(&uiter);
1665 while ((unode = ulist_next(tmp, &uiter))) {
1666 struct btrfs_qgroup_list *glist;
1667
1668 qg = u64_to_ptr(unode->aux);
1669 if (oper->type == BTRFS_QGROUP_OPER_ADD_SHARED) {
1670 if (qg->new_refcnt < seq)
1671 qg->new_refcnt = seq + 1;
1672 else
1673 qg->new_refcnt++;
1674 } else {
1675 if (qg->old_refcnt < seq)
1676 qg->old_refcnt = seq + 1;
1677 else
1678 qg->old_refcnt++;
1679 }
1680 list_for_each_entry(glist, &qg->groups, next_group) {
1681 ret = ulist_add(tmp, glist->group->qgroupid,
1682 ptr_to_u64(glist->group), GFP_ATOMIC);
1683 if (ret < 0)
1684 return ret;
1685 ret = ulist_add(qgroups, glist->group->qgroupid,
1686 ptr_to_u64(glist->group), GFP_ATOMIC);
1687 if (ret < 0)
1688 return ret;
1689 }
1690 }
1691 return 0;
1692}
1693
1694/*
1695 * This adjusts the counters for all referenced qgroups if need be.
1696 */
1697static int qgroup_adjust_counters(struct btrfs_fs_info *fs_info,
1698 u64 root_to_skip, u64 num_bytes,
1699 struct ulist *qgroups, u64 seq,
1700 int old_roots, int new_roots, int rescan)
1701{
1702 struct ulist_node *unode;
1703 struct ulist_iterator uiter;
1704 struct btrfs_qgroup *qg;
1705 u64 cur_new_count, cur_old_count;
1706
1707 ULIST_ITER_INIT(&uiter);
1708 while ((unode = ulist_next(qgroups, &uiter))) {
1709 bool dirty = false;
1710
1711 qg = u64_to_ptr(unode->aux);
1712 /*
1713 * Wasn't referenced before but is now, add to the reference
1714 * counters.
1715 */
1716 if (qg->old_refcnt <= seq && qg->new_refcnt > seq) {
1717 qg->rfer += num_bytes;
1718 qg->rfer_cmpr += num_bytes;
1719 dirty = true;
1720 }
1721
1722 /*
1723 * Was referenced before but isn't now, subtract from the
1724 * reference counters.
1725 */
1726 if (qg->old_refcnt > seq && qg->new_refcnt <= seq) {
1727 qg->rfer -= num_bytes;
1728 qg->rfer_cmpr -= num_bytes;
1729 dirty = true;
1730 }
1731
1732 if (qg->old_refcnt < seq)
1733 cur_old_count = 0;
1734 else
1735 cur_old_count = qg->old_refcnt - seq;
1736 if (qg->new_refcnt < seq)
1737 cur_new_count = 0;
1738 else
1739 cur_new_count = qg->new_refcnt - seq;
1740
1741 /*
1742 * If our refcount was the same as the roots previously but our
1743 * new count isn't the same as the number of roots now then we
1744 * went from having a exclusive reference on this range to not.
1745 */
1746 if (old_roots && cur_old_count == old_roots &&
1747 (cur_new_count != new_roots || new_roots == 0)) {
1748 WARN_ON(cur_new_count != new_roots && new_roots == 0);
1749 qg->excl -= num_bytes;
1750 qg->excl_cmpr -= num_bytes;
1751 dirty = true;
1752 }
1753
1754 /*
1755 * If we didn't reference all the roots before but now we do we
1756 * have an exclusive reference to this range.
1757 */
1758 if ((!old_roots || (old_roots && cur_old_count != old_roots))
1759 && cur_new_count == new_roots) {
1760 qg->excl += num_bytes;
1761 qg->excl_cmpr += num_bytes;
1762 dirty = true;
1763 }
1764
1765 if (dirty)
1766 qgroup_dirty(fs_info, qg);
1767 }
1768 return 0;
1769}
1770
1771/*
1772 * If we removed a data extent and there were other references for that bytenr
1773 * then we need to lookup all referenced roots to make sure we still don't
1774 * reference this bytenr. If we do then we can just discard this operation.
1775 */
1776static int check_existing_refs(struct btrfs_trans_handle *trans,
1777 struct btrfs_fs_info *fs_info,
1778 struct btrfs_qgroup_operation *oper)
1779{
1780 struct ulist *roots = NULL;
1781 struct ulist_node *unode;
1782 struct ulist_iterator uiter;
1783 int ret = 0;
1784
1785 ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr,
1786 oper->elem.seq, &roots);
1787 if (ret < 0)
1788 return ret;
1789 ret = 0;
1790
Jan Schmidt46b665c2013-04-25 16:04:50 +00001791 ULIST_ITER_INIT(&uiter);
1792 while ((unode = ulist_next(roots, &uiter))) {
Josef Bacikfcebe452014-05-13 17:30:47 -07001793 if (unode->val == oper->ref_root) {
1794 ret = 1;
1795 break;
Jan Schmidt46b665c2013-04-25 16:04:50 +00001796 }
1797 }
Josef Bacikfcebe452014-05-13 17:30:47 -07001798 ulist_free(roots);
1799 btrfs_put_tree_mod_seq(fs_info, &oper->elem);
Jan Schmidt46b665c2013-04-25 16:04:50 +00001800
Josef Bacikfcebe452014-05-13 17:30:47 -07001801 return ret;
1802}
1803
1804/*
1805 * If we share a reference across multiple roots then we may need to adjust
1806 * various qgroups referenced and exclusive counters. The basic premise is this
1807 *
1808 * 1) We have seq to represent a 0 count. Instead of looping through all of the
1809 * qgroups and resetting their refcount to 0 we just constantly bump this
1810 * sequence number to act as the base reference count. This means that if
1811 * anybody is equal to or below this sequence they were never referenced. We
1812 * jack this sequence up by the number of roots we found each time in order to
1813 * make sure we don't have any overlap.
1814 *
1815 * 2) We first search all the roots that reference the area _except_ the root
1816 * we're acting on currently. This makes up the old_refcnt of all the qgroups
1817 * before.
1818 *
1819 * 3) We walk all of the qgroups referenced by the root we are currently acting
1820 * on, and will either adjust old_refcnt in the case of a removal or the
1821 * new_refcnt in the case of an addition.
1822 *
1823 * 4) Finally we walk all the qgroups that are referenced by this range
1824 * including the root we are acting on currently. We will adjust the counters
1825 * based on the number of roots we had and will have after this operation.
1826 *
1827 * Take this example as an illustration
1828 *
1829 * [qgroup 1/0]
1830 * / | \
1831 * [qg 0/0] [qg 0/1] [qg 0/2]
1832 * \ | /
1833 * [ extent ]
1834 *
1835 * Say we are adding a reference that is covered by qg 0/0. The first step
1836 * would give a refcnt of 1 to qg 0/1 and 0/2 and a refcnt of 2 to qg 1/0 with
1837 * old_roots being 2. Because it is adding new_roots will be 1. We then go
1838 * through qg 0/0 which will get the new_refcnt set to 1 and add 1 to qg 1/0's
1839 * new_refcnt, bringing it to 3. We then walk through all of the qgroups, we
1840 * notice that the old refcnt for qg 0/0 < the new refcnt, so we added a
1841 * reference and thus must add the size to the referenced bytes. Everything
1842 * else is the same so nothing else changes.
1843 */
1844static int qgroup_shared_accounting(struct btrfs_trans_handle *trans,
1845 struct btrfs_fs_info *fs_info,
1846 struct btrfs_qgroup_operation *oper)
1847{
1848 struct ulist *roots = NULL;
1849 struct ulist *qgroups, *tmp;
1850 struct btrfs_qgroup *qgroup;
David Sterba3284da72015-02-25 15:47:32 +01001851 struct seq_list elem = SEQ_LIST_INIT(elem);
Josef Bacikfcebe452014-05-13 17:30:47 -07001852 u64 seq;
1853 int old_roots = 0;
1854 int new_roots = 0;
1855 int ret = 0;
1856
1857 if (oper->elem.seq) {
1858 ret = check_existing_refs(trans, fs_info, oper);
1859 if (ret < 0)
1860 return ret;
1861 if (ret)
1862 return 0;
1863 }
1864
1865 qgroups = ulist_alloc(GFP_NOFS);
1866 if (!qgroups)
1867 return -ENOMEM;
1868
1869 tmp = ulist_alloc(GFP_NOFS);
Eric Sandeend7372782014-06-12 00:14:59 -05001870 if (!tmp) {
1871 ulist_free(qgroups);
Josef Bacikfcebe452014-05-13 17:30:47 -07001872 return -ENOMEM;
Eric Sandeend7372782014-06-12 00:14:59 -05001873 }
Josef Bacikfcebe452014-05-13 17:30:47 -07001874
1875 btrfs_get_tree_mod_seq(fs_info, &elem);
1876 ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr, elem.seq,
1877 &roots);
1878 btrfs_put_tree_mod_seq(fs_info, &elem);
1879 if (ret < 0) {
1880 ulist_free(qgroups);
1881 ulist_free(tmp);
1882 return ret;
1883 }
1884 spin_lock(&fs_info->qgroup_lock);
1885 qgroup = find_qgroup_rb(fs_info, oper->ref_root);
1886 if (!qgroup)
1887 goto out;
1888 seq = fs_info->qgroup_seq;
1889
1890 /*
1891 * So roots is the list of all the roots currently pointing at the
1892 * bytenr, including the ref we are adding if we are adding, or not if
1893 * we are removing a ref. So we pass in the ref_root to skip that root
1894 * in our calculations. We set old_refnct and new_refcnt cause who the
1895 * hell knows what everything looked like before, and it doesn't matter
1896 * except...
1897 */
1898 ret = qgroup_calc_old_refcnt(fs_info, oper->ref_root, tmp, roots, qgroups,
1899 seq, &old_roots, 0);
1900 if (ret < 0)
1901 goto out;
1902
1903 /*
1904 * Now adjust the refcounts of the qgroups that care about this
1905 * reference, either the old_count in the case of removal or new_count
1906 * in the case of an addition.
1907 */
1908 ret = qgroup_calc_new_refcnt(fs_info, oper, qgroup, tmp, qgroups,
1909 seq);
1910 if (ret < 0)
1911 goto out;
1912
1913 /*
1914 * ...in the case of removals. If we had a removal before we got around
1915 * to processing this operation then we need to find that guy and count
1916 * his references as if they really existed so we don't end up screwing
1917 * up the exclusive counts. Then whenever we go to process the delete
1918 * everything will be grand and we can account for whatever exclusive
1919 * changes need to be made there. We also have to pass in old_roots so
1920 * we have an accurate count of the roots as it pertains to this
1921 * operations view of the world.
1922 */
1923 ret = qgroup_account_deleted_refs(fs_info, oper, tmp, qgroups, seq,
1924 &old_roots);
1925 if (ret < 0)
1926 goto out;
1927
1928 /*
1929 * We are adding our root, need to adjust up the number of roots,
1930 * otherwise old_roots is the number of roots we want.
1931 */
1932 if (oper->type == BTRFS_QGROUP_OPER_ADD_SHARED) {
1933 new_roots = old_roots + 1;
1934 } else {
1935 new_roots = old_roots;
1936 old_roots++;
1937 }
1938 fs_info->qgroup_seq += old_roots + 1;
1939
1940
1941 /*
1942 * And now the magic happens, bless Arne for having a pretty elegant
1943 * solution for this.
1944 */
1945 qgroup_adjust_counters(fs_info, oper->ref_root, oper->num_bytes,
1946 qgroups, seq, old_roots, new_roots, 0);
1947out:
1948 spin_unlock(&fs_info->qgroup_lock);
1949 ulist_free(qgroups);
1950 ulist_free(roots);
1951 ulist_free(tmp);
1952 return ret;
Jan Schmidt46b665c2013-04-25 16:04:50 +00001953}
1954
Arne Jansenbed92ea2012-06-28 18:03:02 +02001955/*
Mark Fasheh11526512014-07-17 12:39:01 -07001956 * Process a reference to a shared subtree. This type of operation is
1957 * queued during snapshot removal when we encounter extents which are
1958 * shared between more than one root.
1959 */
1960static int qgroup_subtree_accounting(struct btrfs_trans_handle *trans,
1961 struct btrfs_fs_info *fs_info,
1962 struct btrfs_qgroup_operation *oper)
1963{
1964 struct ulist *roots = NULL;
1965 struct ulist_node *unode;
1966 struct ulist_iterator uiter;
1967 struct btrfs_qgroup_list *glist;
1968 struct ulist *parents;
1969 int ret = 0;
Mark Fashehf90e5792014-07-17 12:39:04 -07001970 int err;
Mark Fasheh11526512014-07-17 12:39:01 -07001971 struct btrfs_qgroup *qg;
1972 u64 root_obj = 0;
David Sterba3284da72015-02-25 15:47:32 +01001973 struct seq_list elem = SEQ_LIST_INIT(elem);
Mark Fasheh11526512014-07-17 12:39:01 -07001974
1975 parents = ulist_alloc(GFP_NOFS);
1976 if (!parents)
1977 return -ENOMEM;
1978
1979 btrfs_get_tree_mod_seq(fs_info, &elem);
1980 ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr,
1981 elem.seq, &roots);
1982 btrfs_put_tree_mod_seq(fs_info, &elem);
1983 if (ret < 0)
Eric Sandeena3c10892014-08-17 15:09:21 -05001984 goto out;
Mark Fasheh11526512014-07-17 12:39:01 -07001985
1986 if (roots->nnodes != 1)
1987 goto out;
1988
1989 ULIST_ITER_INIT(&uiter);
1990 unode = ulist_next(roots, &uiter); /* Only want 1 so no need to loop */
1991 /*
1992 * If we find our ref root then that means all refs
1993 * this extent has to the root have not yet been
1994 * deleted. In that case, we do nothing and let the
1995 * last ref for this bytenr drive our update.
1996 *
1997 * This can happen for example if an extent is
1998 * referenced multiple times in a snapshot (clone,
1999 * etc). If we are in the middle of snapshot removal,
2000 * queued updates for such an extent will find the
2001 * root if we have not yet finished removing the
2002 * snapshot.
2003 */
2004 if (unode->val == oper->ref_root)
2005 goto out;
2006
2007 root_obj = unode->val;
2008 BUG_ON(!root_obj);
2009
2010 spin_lock(&fs_info->qgroup_lock);
2011 qg = find_qgroup_rb(fs_info, root_obj);
2012 if (!qg)
2013 goto out_unlock;
2014
2015 qg->excl += oper->num_bytes;
2016 qg->excl_cmpr += oper->num_bytes;
2017 qgroup_dirty(fs_info, qg);
2018
2019 /*
2020 * Adjust counts for parent groups. First we find all
2021 * parents, then in the 2nd loop we do the adjustment
2022 * while adding parents of the parents to our ulist.
2023 */
2024 list_for_each_entry(glist, &qg->groups, next_group) {
Mark Fashehf90e5792014-07-17 12:39:04 -07002025 err = ulist_add(parents, glist->group->qgroupid,
Mark Fasheh11526512014-07-17 12:39:01 -07002026 ptr_to_u64(glist->group), GFP_ATOMIC);
Mark Fashehf90e5792014-07-17 12:39:04 -07002027 if (err < 0) {
2028 ret = err;
Mark Fasheh11526512014-07-17 12:39:01 -07002029 goto out_unlock;
Mark Fashehf90e5792014-07-17 12:39:04 -07002030 }
Mark Fasheh11526512014-07-17 12:39:01 -07002031 }
2032
2033 ULIST_ITER_INIT(&uiter);
2034 while ((unode = ulist_next(parents, &uiter))) {
2035 qg = u64_to_ptr(unode->aux);
2036 qg->excl += oper->num_bytes;
2037 qg->excl_cmpr += oper->num_bytes;
2038 qgroup_dirty(fs_info, qg);
2039
2040 /* Add any parents of the parents */
2041 list_for_each_entry(glist, &qg->groups, next_group) {
Mark Fashehf90e5792014-07-17 12:39:04 -07002042 err = ulist_add(parents, glist->group->qgroupid,
Mark Fasheh11526512014-07-17 12:39:01 -07002043 ptr_to_u64(glist->group), GFP_ATOMIC);
Mark Fashehf90e5792014-07-17 12:39:04 -07002044 if (err < 0) {
2045 ret = err;
Mark Fasheh11526512014-07-17 12:39:01 -07002046 goto out_unlock;
Mark Fashehf90e5792014-07-17 12:39:04 -07002047 }
Mark Fasheh11526512014-07-17 12:39:01 -07002048 }
2049 }
2050
2051out_unlock:
2052 spin_unlock(&fs_info->qgroup_lock);
2053
2054out:
2055 ulist_free(roots);
2056 ulist_free(parents);
2057 return ret;
2058}
2059
2060/*
Arne Jansenbed92ea2012-06-28 18:03:02 +02002061 * btrfs_qgroup_account_ref is called for every ref that is added to or deleted
2062 * from the fs. First, all roots referencing the extent are searched, and
2063 * then the space is accounted accordingly to the different roots. The
2064 * accounting algorithm works in 3 steps documented inline.
2065 */
Josef Bacikfcebe452014-05-13 17:30:47 -07002066static int btrfs_qgroup_account(struct btrfs_trans_handle *trans,
2067 struct btrfs_fs_info *fs_info,
2068 struct btrfs_qgroup_operation *oper)
Arne Jansenbed92ea2012-06-28 18:03:02 +02002069{
Arne Jansenbed92ea2012-06-28 18:03:02 +02002070 int ret = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002071
2072 if (!fs_info->quota_enabled)
2073 return 0;
2074
2075 BUG_ON(!fs_info->quota_root);
2076
Jan Schmidt2f232032013-04-25 16:04:51 +00002077 mutex_lock(&fs_info->qgroup_rescan_lock);
2078 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
Josef Bacikfcebe452014-05-13 17:30:47 -07002079 if (fs_info->qgroup_rescan_progress.objectid <= oper->bytenr) {
Jan Schmidt2f232032013-04-25 16:04:51 +00002080 mutex_unlock(&fs_info->qgroup_rescan_lock);
2081 return 0;
2082 }
2083 }
2084 mutex_unlock(&fs_info->qgroup_rescan_lock);
2085
Josef Bacikfcebe452014-05-13 17:30:47 -07002086 ASSERT(is_fstree(oper->ref_root));
Arne Jansenbed92ea2012-06-28 18:03:02 +02002087
Mark Fashehd3982102014-07-17 12:39:00 -07002088 trace_btrfs_qgroup_account(oper);
2089
Josef Bacikfcebe452014-05-13 17:30:47 -07002090 switch (oper->type) {
2091 case BTRFS_QGROUP_OPER_ADD_EXCL:
2092 case BTRFS_QGROUP_OPER_SUB_EXCL:
2093 ret = qgroup_excl_accounting(fs_info, oper);
2094 break;
2095 case BTRFS_QGROUP_OPER_ADD_SHARED:
2096 case BTRFS_QGROUP_OPER_SUB_SHARED:
2097 ret = qgroup_shared_accounting(trans, fs_info, oper);
2098 break;
Mark Fasheh11526512014-07-17 12:39:01 -07002099 case BTRFS_QGROUP_OPER_SUB_SUBTREE:
2100 ret = qgroup_subtree_accounting(trans, fs_info, oper);
2101 break;
Josef Bacikfcebe452014-05-13 17:30:47 -07002102 default:
2103 ASSERT(0);
2104 }
2105 return ret;
2106}
Jan Schmidt2f232032013-04-25 16:04:51 +00002107
Josef Bacikfcebe452014-05-13 17:30:47 -07002108/*
2109 * Needs to be called everytime we run delayed refs, even if there is an error
2110 * in order to cleanup outstanding operations.
2111 */
2112int btrfs_delayed_qgroup_accounting(struct btrfs_trans_handle *trans,
2113 struct btrfs_fs_info *fs_info)
2114{
2115 struct btrfs_qgroup_operation *oper;
2116 int ret = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002117
Josef Bacikfcebe452014-05-13 17:30:47 -07002118 while (!list_empty(&trans->qgroup_ref_list)) {
2119 oper = list_first_entry(&trans->qgroup_ref_list,
2120 struct btrfs_qgroup_operation, list);
2121 list_del_init(&oper->list);
2122 if (!ret || !trans->aborted)
2123 ret = btrfs_qgroup_account(trans, fs_info, oper);
2124 spin_lock(&fs_info->qgroup_op_lock);
2125 rb_erase(&oper->n, &fs_info->qgroup_op_tree);
2126 spin_unlock(&fs_info->qgroup_op_lock);
2127 btrfs_put_tree_mod_seq(fs_info, &oper->elem);
2128 kfree(oper);
2129 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02002130 return ret;
2131}
2132
2133/*
2134 * called from commit_transaction. Writes all changed qgroups to disk.
2135 */
2136int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
2137 struct btrfs_fs_info *fs_info)
2138{
2139 struct btrfs_root *quota_root = fs_info->quota_root;
2140 int ret = 0;
Jan Schmidt3d7b5a22013-04-25 16:04:52 +00002141 int start_rescan_worker = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002142
2143 if (!quota_root)
2144 goto out;
2145
Jan Schmidt3d7b5a22013-04-25 16:04:52 +00002146 if (!fs_info->quota_enabled && fs_info->pending_quota_state)
2147 start_rescan_worker = 1;
2148
Arne Jansenbed92ea2012-06-28 18:03:02 +02002149 fs_info->quota_enabled = fs_info->pending_quota_state;
2150
2151 spin_lock(&fs_info->qgroup_lock);
2152 while (!list_empty(&fs_info->dirty_qgroups)) {
2153 struct btrfs_qgroup *qgroup;
2154 qgroup = list_first_entry(&fs_info->dirty_qgroups,
2155 struct btrfs_qgroup, dirty);
2156 list_del_init(&qgroup->dirty);
2157 spin_unlock(&fs_info->qgroup_lock);
2158 ret = update_qgroup_info_item(trans, quota_root, qgroup);
2159 if (ret)
2160 fs_info->qgroup_flags |=
2161 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
Dongsheng Yangd3001ed2014-11-20 21:04:56 -05002162 ret = update_qgroup_limit_item(trans, quota_root, qgroup);
2163 if (ret)
2164 fs_info->qgroup_flags |=
2165 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002166 spin_lock(&fs_info->qgroup_lock);
2167 }
2168 if (fs_info->quota_enabled)
2169 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
2170 else
2171 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
2172 spin_unlock(&fs_info->qgroup_lock);
2173
2174 ret = update_qgroup_status_item(trans, fs_info, quota_root);
2175 if (ret)
2176 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2177
Jan Schmidt3d7b5a22013-04-25 16:04:52 +00002178 if (!ret && start_rescan_worker) {
Jan Schmidtb382a322013-05-28 15:47:24 +00002179 ret = qgroup_rescan_init(fs_info, 0, 1);
2180 if (!ret) {
2181 qgroup_rescan_zero_tracking(fs_info);
Qu Wenruofc97fab2014-02-28 10:46:16 +08002182 btrfs_queue_work(fs_info->qgroup_rescan_workers,
2183 &fs_info->qgroup_rescan_work);
Jan Schmidtb382a322013-05-28 15:47:24 +00002184 }
Jan Schmidt3d7b5a22013-04-25 16:04:52 +00002185 ret = 0;
2186 }
2187
Arne Jansenbed92ea2012-06-28 18:03:02 +02002188out:
2189
2190 return ret;
2191}
2192
2193/*
2194 * copy the acounting information between qgroups. This is necessary when a
2195 * snapshot or a subvolume is created
2196 */
2197int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
2198 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
2199 struct btrfs_qgroup_inherit *inherit)
2200{
2201 int ret = 0;
2202 int i;
2203 u64 *i_qgroups;
2204 struct btrfs_root *quota_root = fs_info->quota_root;
2205 struct btrfs_qgroup *srcgroup;
2206 struct btrfs_qgroup *dstgroup;
2207 u32 level_size = 0;
Wang Shilong3f5e2d32013-04-07 10:50:19 +00002208 u64 nums;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002209
Wang Shilongf2f6ed32013-04-07 10:50:16 +00002210 mutex_lock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02002211 if (!fs_info->quota_enabled)
Wang Shilongf2f6ed32013-04-07 10:50:16 +00002212 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002213
Wang Shilongf2f6ed32013-04-07 10:50:16 +00002214 if (!quota_root) {
2215 ret = -EINVAL;
2216 goto out;
2217 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02002218
Wang Shilong3f5e2d32013-04-07 10:50:19 +00002219 if (inherit) {
2220 i_qgroups = (u64 *)(inherit + 1);
2221 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
2222 2 * inherit->num_excl_copies;
2223 for (i = 0; i < nums; ++i) {
2224 srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
2225 if (!srcgroup) {
2226 ret = -EINVAL;
2227 goto out;
2228 }
2229 ++i_qgroups;
2230 }
2231 }
2232
Arne Jansenbed92ea2012-06-28 18:03:02 +02002233 /*
2234 * create a tracking group for the subvol itself
2235 */
2236 ret = add_qgroup_item(trans, quota_root, objectid);
2237 if (ret)
2238 goto out;
2239
Arne Jansenbed92ea2012-06-28 18:03:02 +02002240 if (srcid) {
2241 struct btrfs_root *srcroot;
2242 struct btrfs_key srckey;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002243
2244 srckey.objectid = srcid;
2245 srckey.type = BTRFS_ROOT_ITEM_KEY;
2246 srckey.offset = (u64)-1;
2247 srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
2248 if (IS_ERR(srcroot)) {
2249 ret = PTR_ERR(srcroot);
2250 goto out;
2251 }
2252
2253 rcu_read_lock();
David Sterba707e8a02014-06-04 19:22:26 +02002254 level_size = srcroot->nodesize;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002255 rcu_read_unlock();
2256 }
2257
2258 /*
2259 * add qgroup to all inherited groups
2260 */
2261 if (inherit) {
2262 i_qgroups = (u64 *)(inherit + 1);
2263 for (i = 0; i < inherit->num_qgroups; ++i) {
2264 ret = add_qgroup_relation_item(trans, quota_root,
2265 objectid, *i_qgroups);
2266 if (ret)
2267 goto out;
2268 ret = add_qgroup_relation_item(trans, quota_root,
2269 *i_qgroups, objectid);
2270 if (ret)
2271 goto out;
2272 ++i_qgroups;
2273 }
2274 }
2275
2276
2277 spin_lock(&fs_info->qgroup_lock);
2278
2279 dstgroup = add_qgroup_rb(fs_info, objectid);
Dan Carpenter57a5a882012-07-30 02:15:43 -06002280 if (IS_ERR(dstgroup)) {
2281 ret = PTR_ERR(dstgroup);
Arne Jansenbed92ea2012-06-28 18:03:02 +02002282 goto unlock;
Dan Carpenter57a5a882012-07-30 02:15:43 -06002283 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02002284
Dongsheng Yange8c85412014-11-20 20:58:34 -05002285 if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
Dongsheng Yange8c85412014-11-20 20:58:34 -05002286 dstgroup->lim_flags = inherit->lim.flags;
2287 dstgroup->max_rfer = inherit->lim.max_rfer;
2288 dstgroup->max_excl = inherit->lim.max_excl;
2289 dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
2290 dstgroup->rsv_excl = inherit->lim.rsv_excl;
Dongsheng Yang1510e712014-11-20 21:01:41 -05002291
2292 ret = update_qgroup_limit_item(trans, quota_root, dstgroup);
2293 if (ret) {
2294 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2295 btrfs_info(fs_info, "unable to update quota limit for %llu",
2296 dstgroup->qgroupid);
2297 goto unlock;
2298 }
Dongsheng Yange8c85412014-11-20 20:58:34 -05002299 }
2300
Arne Jansenbed92ea2012-06-28 18:03:02 +02002301 if (srcid) {
2302 srcgroup = find_qgroup_rb(fs_info, srcid);
Chris Masonf3a87f12012-09-14 20:06:30 -04002303 if (!srcgroup)
Arne Jansenbed92ea2012-06-28 18:03:02 +02002304 goto unlock;
Josef Bacikfcebe452014-05-13 17:30:47 -07002305
2306 /*
2307 * We call inherit after we clone the root in order to make sure
2308 * our counts don't go crazy, so at this point the only
2309 * difference between the two roots should be the root node.
2310 */
2311 dstgroup->rfer = srcgroup->rfer;
2312 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
2313 dstgroup->excl = level_size;
2314 dstgroup->excl_cmpr = level_size;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002315 srcgroup->excl = level_size;
2316 srcgroup->excl_cmpr = level_size;
Dongsheng Yang3eeb4d52014-11-20 20:14:38 -05002317
2318 /* inherit the limit info */
2319 dstgroup->lim_flags = srcgroup->lim_flags;
2320 dstgroup->max_rfer = srcgroup->max_rfer;
2321 dstgroup->max_excl = srcgroup->max_excl;
2322 dstgroup->rsv_rfer = srcgroup->rsv_rfer;
2323 dstgroup->rsv_excl = srcgroup->rsv_excl;
2324
Arne Jansenbed92ea2012-06-28 18:03:02 +02002325 qgroup_dirty(fs_info, dstgroup);
2326 qgroup_dirty(fs_info, srcgroup);
2327 }
2328
Chris Masonf3a87f12012-09-14 20:06:30 -04002329 if (!inherit)
Arne Jansenbed92ea2012-06-28 18:03:02 +02002330 goto unlock;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002331
2332 i_qgroups = (u64 *)(inherit + 1);
2333 for (i = 0; i < inherit->num_qgroups; ++i) {
2334 ret = add_relation_rb(quota_root->fs_info, objectid,
2335 *i_qgroups);
2336 if (ret)
2337 goto unlock;
2338 ++i_qgroups;
2339 }
2340
2341 for (i = 0; i < inherit->num_ref_copies; ++i) {
2342 struct btrfs_qgroup *src;
2343 struct btrfs_qgroup *dst;
2344
2345 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2346 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2347
2348 if (!src || !dst) {
2349 ret = -EINVAL;
2350 goto unlock;
2351 }
2352
2353 dst->rfer = src->rfer - level_size;
2354 dst->rfer_cmpr = src->rfer_cmpr - level_size;
2355 i_qgroups += 2;
2356 }
2357 for (i = 0; i < inherit->num_excl_copies; ++i) {
2358 struct btrfs_qgroup *src;
2359 struct btrfs_qgroup *dst;
2360
2361 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2362 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2363
2364 if (!src || !dst) {
2365 ret = -EINVAL;
2366 goto unlock;
2367 }
2368
2369 dst->excl = src->excl + level_size;
2370 dst->excl_cmpr = src->excl_cmpr + level_size;
2371 i_qgroups += 2;
2372 }
2373
2374unlock:
2375 spin_unlock(&fs_info->qgroup_lock);
2376out:
Wang Shilongf2f6ed32013-04-07 10:50:16 +00002377 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02002378 return ret;
2379}
2380
2381/*
2382 * reserve some space for a qgroup and all its parents. The reservation takes
2383 * place with start_transaction or dealloc_reserve, similar to ENOSPC
2384 * accounting. If not enough space is available, EDQUOT is returned.
2385 * We assume that the requested space is new for all qgroups.
2386 */
2387int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
2388{
2389 struct btrfs_root *quota_root;
2390 struct btrfs_qgroup *qgroup;
2391 struct btrfs_fs_info *fs_info = root->fs_info;
2392 u64 ref_root = root->root_key.objectid;
2393 int ret = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002394 struct ulist_node *unode;
2395 struct ulist_iterator uiter;
2396
2397 if (!is_fstree(ref_root))
2398 return 0;
2399
2400 if (num_bytes == 0)
2401 return 0;
2402
2403 spin_lock(&fs_info->qgroup_lock);
2404 quota_root = fs_info->quota_root;
2405 if (!quota_root)
2406 goto out;
2407
2408 qgroup = find_qgroup_rb(fs_info, ref_root);
2409 if (!qgroup)
2410 goto out;
2411
2412 /*
2413 * in a first step, we check all affected qgroups if any limits would
2414 * be exceeded
2415 */
Wang Shilong1e8f9152013-05-06 11:03:27 +00002416 ulist_reinit(fs_info->qgroup_ulist);
2417 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
Wang Shilong3c971852013-04-17 14:00:36 +00002418 (uintptr_t)qgroup, GFP_ATOMIC);
2419 if (ret < 0)
2420 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002421 ULIST_ITER_INIT(&uiter);
Wang Shilong1e8f9152013-05-06 11:03:27 +00002422 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
Arne Jansenbed92ea2012-06-28 18:03:02 +02002423 struct btrfs_qgroup *qg;
2424 struct btrfs_qgroup_list *glist;
2425
Josef Bacikfcebe452014-05-13 17:30:47 -07002426 qg = u64_to_ptr(unode->aux);
Arne Jansenbed92ea2012-06-28 18:03:02 +02002427
2428 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
Wang Shilongb4fcd6b2013-04-15 12:56:49 +00002429 qg->reserved + (s64)qg->rfer + num_bytes >
Wang Shilong720f1e22013-03-06 11:51:47 +00002430 qg->max_rfer) {
Arne Jansenbed92ea2012-06-28 18:03:02 +02002431 ret = -EDQUOT;
Wang Shilong720f1e22013-03-06 11:51:47 +00002432 goto out;
2433 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02002434
2435 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
Wang Shilongb4fcd6b2013-04-15 12:56:49 +00002436 qg->reserved + (s64)qg->excl + num_bytes >
Wang Shilong720f1e22013-03-06 11:51:47 +00002437 qg->max_excl) {
Arne Jansenbed92ea2012-06-28 18:03:02 +02002438 ret = -EDQUOT;
Wang Shilong720f1e22013-03-06 11:51:47 +00002439 goto out;
2440 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02002441
2442 list_for_each_entry(glist, &qg->groups, next_group) {
Wang Shilong1e8f9152013-05-06 11:03:27 +00002443 ret = ulist_add(fs_info->qgroup_ulist,
2444 glist->group->qgroupid,
Wang Shilong3c971852013-04-17 14:00:36 +00002445 (uintptr_t)glist->group, GFP_ATOMIC);
2446 if (ret < 0)
2447 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002448 }
2449 }
Wang Shilong3c971852013-04-17 14:00:36 +00002450 ret = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002451 /*
2452 * no limits exceeded, now record the reservation into all qgroups
2453 */
2454 ULIST_ITER_INIT(&uiter);
Wang Shilong1e8f9152013-05-06 11:03:27 +00002455 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
Arne Jansenbed92ea2012-06-28 18:03:02 +02002456 struct btrfs_qgroup *qg;
2457
Josef Bacikfcebe452014-05-13 17:30:47 -07002458 qg = u64_to_ptr(unode->aux);
Arne Jansenbed92ea2012-06-28 18:03:02 +02002459
2460 qg->reserved += num_bytes;
2461 }
2462
2463out:
2464 spin_unlock(&fs_info->qgroup_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02002465 return ret;
2466}
2467
2468void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
2469{
2470 struct btrfs_root *quota_root;
2471 struct btrfs_qgroup *qgroup;
2472 struct btrfs_fs_info *fs_info = root->fs_info;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002473 struct ulist_node *unode;
2474 struct ulist_iterator uiter;
2475 u64 ref_root = root->root_key.objectid;
Wang Shilong3c971852013-04-17 14:00:36 +00002476 int ret = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002477
2478 if (!is_fstree(ref_root))
2479 return;
2480
2481 if (num_bytes == 0)
2482 return;
2483
2484 spin_lock(&fs_info->qgroup_lock);
2485
2486 quota_root = fs_info->quota_root;
2487 if (!quota_root)
2488 goto out;
2489
2490 qgroup = find_qgroup_rb(fs_info, ref_root);
2491 if (!qgroup)
2492 goto out;
2493
Wang Shilong1e8f9152013-05-06 11:03:27 +00002494 ulist_reinit(fs_info->qgroup_ulist);
2495 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
Wang Shilong3c971852013-04-17 14:00:36 +00002496 (uintptr_t)qgroup, GFP_ATOMIC);
2497 if (ret < 0)
2498 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002499 ULIST_ITER_INIT(&uiter);
Wang Shilong1e8f9152013-05-06 11:03:27 +00002500 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
Arne Jansenbed92ea2012-06-28 18:03:02 +02002501 struct btrfs_qgroup *qg;
2502 struct btrfs_qgroup_list *glist;
2503
Josef Bacikfcebe452014-05-13 17:30:47 -07002504 qg = u64_to_ptr(unode->aux);
Arne Jansenbed92ea2012-06-28 18:03:02 +02002505
2506 qg->reserved -= num_bytes;
2507
2508 list_for_each_entry(glist, &qg->groups, next_group) {
Wang Shilong1e8f9152013-05-06 11:03:27 +00002509 ret = ulist_add(fs_info->qgroup_ulist,
2510 glist->group->qgroupid,
Wang Shilong3c971852013-04-17 14:00:36 +00002511 (uintptr_t)glist->group, GFP_ATOMIC);
2512 if (ret < 0)
2513 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002514 }
2515 }
2516
2517out:
2518 spin_unlock(&fs_info->qgroup_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02002519}
2520
2521void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
2522{
2523 if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
2524 return;
Frank Holtonefe120a2013-12-20 11:37:06 -05002525 btrfs_err(trans->root->fs_info,
2526 "qgroups not uptodate in trans handle %p: list is%s empty, "
2527 "seq is %#x.%x",
Arne Jansenbed92ea2012-06-28 18:03:02 +02002528 trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
Jan Schmidtfc36ed7e2013-04-24 16:57:33 +00002529 (u32)(trans->delayed_ref_elem.seq >> 32),
2530 (u32)trans->delayed_ref_elem.seq);
Arne Jansenbed92ea2012-06-28 18:03:02 +02002531 BUG();
2532}
Jan Schmidt2f232032013-04-25 16:04:51 +00002533
2534/*
2535 * returns < 0 on error, 0 when more leafs are to be scanned.
2536 * returns 1 when done, 2 when done and FLAG_INCONSISTENT was cleared.
2537 */
2538static int
Jan Schmidtb382a322013-05-28 15:47:24 +00002539qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
Josef Bacikfcebe452014-05-13 17:30:47 -07002540 struct btrfs_trans_handle *trans, struct ulist *qgroups,
2541 struct ulist *tmp, struct extent_buffer *scratch_leaf)
Jan Schmidt2f232032013-04-25 16:04:51 +00002542{
2543 struct btrfs_key found;
Jan Schmidt2f232032013-04-25 16:04:51 +00002544 struct ulist *roots = NULL;
David Sterba3284da72015-02-25 15:47:32 +01002545 struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
Josef Bacikfcebe452014-05-13 17:30:47 -07002546 u64 num_bytes;
Jan Schmidt2f232032013-04-25 16:04:51 +00002547 u64 seq;
Josef Bacikfcebe452014-05-13 17:30:47 -07002548 int new_roots;
Jan Schmidt2f232032013-04-25 16:04:51 +00002549 int slot;
2550 int ret;
2551
2552 path->leave_spinning = 1;
2553 mutex_lock(&fs_info->qgroup_rescan_lock);
2554 ret = btrfs_search_slot_for_read(fs_info->extent_root,
2555 &fs_info->qgroup_rescan_progress,
2556 path, 1, 0);
2557
2558 pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02002559 fs_info->qgroup_rescan_progress.objectid,
Jan Schmidt2f232032013-04-25 16:04:51 +00002560 fs_info->qgroup_rescan_progress.type,
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02002561 fs_info->qgroup_rescan_progress.offset, ret);
Jan Schmidt2f232032013-04-25 16:04:51 +00002562
2563 if (ret) {
2564 /*
2565 * The rescan is about to end, we will not be scanning any
2566 * further blocks. We cannot unset the RESCAN flag here, because
2567 * we want to commit the transaction if everything went well.
2568 * To make the live accounting work in this phase, we set our
2569 * scan progress pointer such that every real extent objectid
2570 * will be smaller.
2571 */
2572 fs_info->qgroup_rescan_progress.objectid = (u64)-1;
2573 btrfs_release_path(path);
2574 mutex_unlock(&fs_info->qgroup_rescan_lock);
2575 return ret;
2576 }
2577
2578 btrfs_item_key_to_cpu(path->nodes[0], &found,
2579 btrfs_header_nritems(path->nodes[0]) - 1);
2580 fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
2581
2582 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2583 memcpy(scratch_leaf, path->nodes[0], sizeof(*scratch_leaf));
2584 slot = path->slots[0];
2585 btrfs_release_path(path);
2586 mutex_unlock(&fs_info->qgroup_rescan_lock);
2587
2588 for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
2589 btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
Josef Bacik3a6d75e2014-01-23 16:45:10 -05002590 if (found.type != BTRFS_EXTENT_ITEM_KEY &&
2591 found.type != BTRFS_METADATA_ITEM_KEY)
Jan Schmidt2f232032013-04-25 16:04:51 +00002592 continue;
Josef Bacik3a6d75e2014-01-23 16:45:10 -05002593 if (found.type == BTRFS_METADATA_ITEM_KEY)
David Sterba707e8a02014-06-04 19:22:26 +02002594 num_bytes = fs_info->extent_root->nodesize;
Josef Bacik3a6d75e2014-01-23 16:45:10 -05002595 else
2596 num_bytes = found.offset;
2597
Josef Bacikfcebe452014-05-13 17:30:47 -07002598 ulist_reinit(qgroups);
2599 ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
2600 &roots);
Jan Schmidt2f232032013-04-25 16:04:51 +00002601 if (ret < 0)
2602 goto out;
2603 spin_lock(&fs_info->qgroup_lock);
2604 seq = fs_info->qgroup_seq;
2605 fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
2606
Josef Bacikfcebe452014-05-13 17:30:47 -07002607 new_roots = 0;
2608 ret = qgroup_calc_old_refcnt(fs_info, 0, tmp, roots, qgroups,
2609 seq, &new_roots, 1);
2610 if (ret < 0) {
Jan Schmidt2f232032013-04-25 16:04:51 +00002611 spin_unlock(&fs_info->qgroup_lock);
2612 ulist_free(roots);
2613 goto out;
2614 }
2615
Josef Bacikfcebe452014-05-13 17:30:47 -07002616 ret = qgroup_adjust_counters(fs_info, 0, num_bytes, qgroups,
2617 seq, 0, new_roots, 1);
2618 if (ret < 0) {
2619 spin_unlock(&fs_info->qgroup_lock);
2620 ulist_free(roots);
2621 goto out;
Jan Schmidt2f232032013-04-25 16:04:51 +00002622 }
Jan Schmidt2f232032013-04-25 16:04:51 +00002623 spin_unlock(&fs_info->qgroup_lock);
2624 ulist_free(roots);
Jan Schmidt2f232032013-04-25 16:04:51 +00002625 }
Jan Schmidt2f232032013-04-25 16:04:51 +00002626out:
2627 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2628
2629 return ret;
2630}
2631
Qu Wenruod458b052014-02-28 10:46:19 +08002632static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
Jan Schmidt2f232032013-04-25 16:04:51 +00002633{
Jan Schmidtb382a322013-05-28 15:47:24 +00002634 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
2635 qgroup_rescan_work);
Jan Schmidt2f232032013-04-25 16:04:51 +00002636 struct btrfs_path *path;
2637 struct btrfs_trans_handle *trans = NULL;
Josef Bacikfcebe452014-05-13 17:30:47 -07002638 struct ulist *tmp = NULL, *qgroups = NULL;
Jan Schmidt2f232032013-04-25 16:04:51 +00002639 struct extent_buffer *scratch_leaf = NULL;
2640 int err = -ENOMEM;
2641
2642 path = btrfs_alloc_path();
2643 if (!path)
2644 goto out;
Josef Bacikfcebe452014-05-13 17:30:47 -07002645 qgroups = ulist_alloc(GFP_NOFS);
2646 if (!qgroups)
2647 goto out;
Jan Schmidt2f232032013-04-25 16:04:51 +00002648 tmp = ulist_alloc(GFP_NOFS);
2649 if (!tmp)
2650 goto out;
2651 scratch_leaf = kmalloc(sizeof(*scratch_leaf), GFP_NOFS);
2652 if (!scratch_leaf)
2653 goto out;
2654
2655 err = 0;
2656 while (!err) {
2657 trans = btrfs_start_transaction(fs_info->fs_root, 0);
2658 if (IS_ERR(trans)) {
2659 err = PTR_ERR(trans);
2660 break;
2661 }
2662 if (!fs_info->quota_enabled) {
2663 err = -EINTR;
2664 } else {
Jan Schmidtb382a322013-05-28 15:47:24 +00002665 err = qgroup_rescan_leaf(fs_info, path, trans,
Josef Bacikfcebe452014-05-13 17:30:47 -07002666 qgroups, tmp, scratch_leaf);
Jan Schmidt2f232032013-04-25 16:04:51 +00002667 }
2668 if (err > 0)
2669 btrfs_commit_transaction(trans, fs_info->fs_root);
2670 else
2671 btrfs_end_transaction(trans, fs_info->fs_root);
2672 }
2673
2674out:
2675 kfree(scratch_leaf);
Josef Bacikfcebe452014-05-13 17:30:47 -07002676 ulist_free(qgroups);
Josef Bacik2a108402014-05-20 09:23:31 -04002677 ulist_free(tmp);
Jan Schmidt2f232032013-04-25 16:04:51 +00002678 btrfs_free_path(path);
Jan Schmidt2f232032013-04-25 16:04:51 +00002679
2680 mutex_lock(&fs_info->qgroup_rescan_lock);
2681 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2682
2683 if (err == 2 &&
2684 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
2685 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2686 } else if (err < 0) {
2687 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2688 }
2689 mutex_unlock(&fs_info->qgroup_rescan_lock);
2690
2691 if (err >= 0) {
Frank Holtonefe120a2013-12-20 11:37:06 -05002692 btrfs_info(fs_info, "qgroup scan completed%s",
Jan Schmidt2f232032013-04-25 16:04:51 +00002693 err == 2 ? " (inconsistency flag cleared)" : "");
2694 } else {
Frank Holtonefe120a2013-12-20 11:37:06 -05002695 btrfs_err(fs_info, "qgroup scan failed with %d", err);
Jan Schmidt2f232032013-04-25 16:04:51 +00002696 }
Jan Schmidt57254b6e2013-05-06 19:14:17 +00002697
2698 complete_all(&fs_info->qgroup_rescan_completion);
Jan Schmidt2f232032013-04-25 16:04:51 +00002699}
2700
Jan Schmidtb382a322013-05-28 15:47:24 +00002701/*
2702 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
2703 * memory required for the rescan context.
2704 */
2705static int
2706qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
2707 int init_flags)
Jan Schmidt2f232032013-04-25 16:04:51 +00002708{
2709 int ret = 0;
Jan Schmidt2f232032013-04-25 16:04:51 +00002710
Jan Schmidtb382a322013-05-28 15:47:24 +00002711 if (!init_flags &&
2712 (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ||
2713 !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) {
2714 ret = -EINVAL;
2715 goto err;
2716 }
Jan Schmidt2f232032013-04-25 16:04:51 +00002717
2718 mutex_lock(&fs_info->qgroup_rescan_lock);
2719 spin_lock(&fs_info->qgroup_lock);
Jan Schmidtb382a322013-05-28 15:47:24 +00002720
2721 if (init_flags) {
2722 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2723 ret = -EINPROGRESS;
2724 else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
2725 ret = -EINVAL;
2726
2727 if (ret) {
2728 spin_unlock(&fs_info->qgroup_lock);
2729 mutex_unlock(&fs_info->qgroup_rescan_lock);
2730 goto err;
2731 }
2732
2733 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2734 }
2735
2736 memset(&fs_info->qgroup_rescan_progress, 0,
2737 sizeof(fs_info->qgroup_rescan_progress));
2738 fs_info->qgroup_rescan_progress.objectid = progress_objectid;
2739
2740 spin_unlock(&fs_info->qgroup_lock);
2741 mutex_unlock(&fs_info->qgroup_rescan_lock);
2742
2743 init_completion(&fs_info->qgroup_rescan_completion);
2744
2745 memset(&fs_info->qgroup_rescan_work, 0,
2746 sizeof(fs_info->qgroup_rescan_work));
Qu Wenruofc97fab2014-02-28 10:46:16 +08002747 btrfs_init_work(&fs_info->qgroup_rescan_work,
Liu Bo9e0af232014-08-15 23:36:53 +08002748 btrfs_qgroup_rescan_helper,
Qu Wenruofc97fab2014-02-28 10:46:16 +08002749 btrfs_qgroup_rescan_worker, NULL, NULL);
Jan Schmidtb382a322013-05-28 15:47:24 +00002750
Jan Schmidt2f232032013-04-25 16:04:51 +00002751 if (ret) {
Jan Schmidtb382a322013-05-28 15:47:24 +00002752err:
Frank Holtonefe120a2013-12-20 11:37:06 -05002753 btrfs_info(fs_info, "qgroup_rescan_init failed with %d", ret);
Jan Schmidt2f232032013-04-25 16:04:51 +00002754 return ret;
2755 }
2756
Jan Schmidtb382a322013-05-28 15:47:24 +00002757 return 0;
2758}
Jan Schmidt2f232032013-04-25 16:04:51 +00002759
Jan Schmidtb382a322013-05-28 15:47:24 +00002760static void
2761qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
2762{
2763 struct rb_node *n;
2764 struct btrfs_qgroup *qgroup;
2765
2766 spin_lock(&fs_info->qgroup_lock);
Jan Schmidt2f232032013-04-25 16:04:51 +00002767 /* clear all current qgroup tracking information */
2768 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
2769 qgroup = rb_entry(n, struct btrfs_qgroup, node);
2770 qgroup->rfer = 0;
2771 qgroup->rfer_cmpr = 0;
2772 qgroup->excl = 0;
2773 qgroup->excl_cmpr = 0;
2774 }
2775 spin_unlock(&fs_info->qgroup_lock);
Jan Schmidtb382a322013-05-28 15:47:24 +00002776}
Jan Schmidt2f232032013-04-25 16:04:51 +00002777
Jan Schmidtb382a322013-05-28 15:47:24 +00002778int
2779btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
2780{
2781 int ret = 0;
2782 struct btrfs_trans_handle *trans;
2783
2784 ret = qgroup_rescan_init(fs_info, 0, 1);
2785 if (ret)
2786 return ret;
2787
2788 /*
2789 * We have set the rescan_progress to 0, which means no more
2790 * delayed refs will be accounted by btrfs_qgroup_account_ref.
2791 * However, btrfs_qgroup_account_ref may be right after its call
2792 * to btrfs_find_all_roots, in which case it would still do the
2793 * accounting.
2794 * To solve this, we're committing the transaction, which will
2795 * ensure we run all delayed refs and only after that, we are
2796 * going to clear all tracking information for a clean start.
2797 */
2798
2799 trans = btrfs_join_transaction(fs_info->fs_root);
2800 if (IS_ERR(trans)) {
2801 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2802 return PTR_ERR(trans);
2803 }
2804 ret = btrfs_commit_transaction(trans, fs_info->fs_root);
2805 if (ret) {
2806 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2807 return ret;
2808 }
2809
2810 qgroup_rescan_zero_tracking(fs_info);
2811
Qu Wenruofc97fab2014-02-28 10:46:16 +08002812 btrfs_queue_work(fs_info->qgroup_rescan_workers,
2813 &fs_info->qgroup_rescan_work);
Jan Schmidt2f232032013-04-25 16:04:51 +00002814
2815 return 0;
2816}
Jan Schmidt57254b6e2013-05-06 19:14:17 +00002817
2818int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info)
2819{
2820 int running;
2821 int ret = 0;
2822
2823 mutex_lock(&fs_info->qgroup_rescan_lock);
2824 spin_lock(&fs_info->qgroup_lock);
2825 running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2826 spin_unlock(&fs_info->qgroup_lock);
2827 mutex_unlock(&fs_info->qgroup_rescan_lock);
2828
2829 if (running)
2830 ret = wait_for_completion_interruptible(
2831 &fs_info->qgroup_rescan_completion);
2832
2833 return ret;
2834}
Jan Schmidtb382a322013-05-28 15:47:24 +00002835
2836/*
2837 * this is only called from open_ctree where we're still single threaded, thus
2838 * locking is omitted here.
2839 */
2840void
2841btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
2842{
2843 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
Qu Wenruofc97fab2014-02-28 10:46:16 +08002844 btrfs_queue_work(fs_info->qgroup_rescan_workers,
2845 &fs_info->qgroup_rescan_work);
Jan Schmidtb382a322013-05-28 15:47:24 +00002846}