blob: 01eeffe9ae537fc8b9210c0065c23c9ffa5166be [file] [log] [blame]
Arne Jansenbed92ea2012-06-28 18:03:02 +02001/*
2 * Copyright (C) 2011 STRATO. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include <linux/pagemap.h>
21#include <linux/writeback.h>
22#include <linux/blkdev.h>
23#include <linux/rbtree.h>
24#include <linux/slab.h>
25#include <linux/workqueue.h>
Filipe Brandenburger55e301f2013-01-29 06:04:50 +000026#include <linux/btrfs.h>
Arne Jansenbed92ea2012-06-28 18:03:02 +020027
28#include "ctree.h"
29#include "transaction.h"
30#include "disk-io.h"
31#include "locking.h"
32#include "ulist.h"
Arne Jansenbed92ea2012-06-28 18:03:02 +020033#include "backref.h"
Jan Schmidt2f232032013-04-25 16:04:51 +000034#include "extent_io.h"
Arne Jansenbed92ea2012-06-28 18:03:02 +020035
36/* TODO XXX FIXME
37 * - subvol delete -> delete when ref goes to 0? delete limits also?
38 * - reorganize keys
39 * - compressed
40 * - sync
Arne Jansenbed92ea2012-06-28 18:03:02 +020041 * - copy also limits on subvol creation
42 * - limit
43 * - caches fuer ulists
44 * - performance benchmarks
45 * - check all ioctl parameters
46 */
47
48/*
49 * one struct for each qgroup, organized in fs_info->qgroup_tree.
50 */
51struct btrfs_qgroup {
52 u64 qgroupid;
53
54 /*
55 * state
56 */
57 u64 rfer; /* referenced */
58 u64 rfer_cmpr; /* referenced compressed */
59 u64 excl; /* exclusive */
60 u64 excl_cmpr; /* exclusive compressed */
61
62 /*
63 * limits
64 */
65 u64 lim_flags; /* which limits are set */
66 u64 max_rfer;
67 u64 max_excl;
68 u64 rsv_rfer;
69 u64 rsv_excl;
70
71 /*
72 * reservation tracking
73 */
74 u64 reserved;
75
76 /*
77 * lists
78 */
79 struct list_head groups; /* groups this group is member of */
80 struct list_head members; /* groups that are members of this group */
81 struct list_head dirty; /* dirty groups */
82 struct rb_node node; /* tree of qgroups */
83
84 /*
85 * temp variables for accounting operations
86 */
87 u64 tag;
88 u64 refcnt;
89};
90
91/*
92 * glue structure to represent the relations between qgroups.
93 */
94struct btrfs_qgroup_list {
95 struct list_head next_group;
96 struct list_head next_member;
97 struct btrfs_qgroup *group;
98 struct btrfs_qgroup *member;
99};
100
Jan Schmidtb382a322013-05-28 15:47:24 +0000101static int
102qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
103 int init_flags);
104static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
Jan Schmidt2f232032013-04-25 16:04:51 +0000105
Wang Shilong58400fc2013-04-07 10:50:17 +0000106/* must be called with qgroup_ioctl_lock held */
Arne Jansenbed92ea2012-06-28 18:03:02 +0200107static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
108 u64 qgroupid)
109{
110 struct rb_node *n = fs_info->qgroup_tree.rb_node;
111 struct btrfs_qgroup *qgroup;
112
113 while (n) {
114 qgroup = rb_entry(n, struct btrfs_qgroup, node);
115 if (qgroup->qgroupid < qgroupid)
116 n = n->rb_left;
117 else if (qgroup->qgroupid > qgroupid)
118 n = n->rb_right;
119 else
120 return qgroup;
121 }
122 return NULL;
123}
124
125/* must be called with qgroup_lock held */
126static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
127 u64 qgroupid)
128{
129 struct rb_node **p = &fs_info->qgroup_tree.rb_node;
130 struct rb_node *parent = NULL;
131 struct btrfs_qgroup *qgroup;
132
133 while (*p) {
134 parent = *p;
135 qgroup = rb_entry(parent, struct btrfs_qgroup, node);
136
137 if (qgroup->qgroupid < qgroupid)
138 p = &(*p)->rb_left;
139 else if (qgroup->qgroupid > qgroupid)
140 p = &(*p)->rb_right;
141 else
142 return qgroup;
143 }
144
145 qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
146 if (!qgroup)
147 return ERR_PTR(-ENOMEM);
148
149 qgroup->qgroupid = qgroupid;
150 INIT_LIST_HEAD(&qgroup->groups);
151 INIT_LIST_HEAD(&qgroup->members);
152 INIT_LIST_HEAD(&qgroup->dirty);
153
154 rb_link_node(&qgroup->node, parent, p);
155 rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
156
157 return qgroup;
158}
159
Wang Shilong4082bd32013-08-14 09:13:36 +0800160static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200161{
Arne Jansenbed92ea2012-06-28 18:03:02 +0200162 struct btrfs_qgroup_list *list;
163
Arne Jansenbed92ea2012-06-28 18:03:02 +0200164 list_del(&qgroup->dirty);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200165 while (!list_empty(&qgroup->groups)) {
166 list = list_first_entry(&qgroup->groups,
167 struct btrfs_qgroup_list, next_group);
168 list_del(&list->next_group);
169 list_del(&list->next_member);
170 kfree(list);
171 }
172
173 while (!list_empty(&qgroup->members)) {
174 list = list_first_entry(&qgroup->members,
175 struct btrfs_qgroup_list, next_member);
176 list_del(&list->next_group);
177 list_del(&list->next_member);
178 kfree(list);
179 }
180 kfree(qgroup);
Wang Shilong4082bd32013-08-14 09:13:36 +0800181}
Arne Jansenbed92ea2012-06-28 18:03:02 +0200182
Wang Shilong4082bd32013-08-14 09:13:36 +0800183/* must be called with qgroup_lock held */
184static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
185{
186 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
187
188 if (!qgroup)
189 return -ENOENT;
190
191 rb_erase(&qgroup->node, &fs_info->qgroup_tree);
192 __del_qgroup_rb(qgroup);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200193 return 0;
194}
195
196/* must be called with qgroup_lock held */
197static int add_relation_rb(struct btrfs_fs_info *fs_info,
198 u64 memberid, u64 parentid)
199{
200 struct btrfs_qgroup *member;
201 struct btrfs_qgroup *parent;
202 struct btrfs_qgroup_list *list;
203
204 member = find_qgroup_rb(fs_info, memberid);
205 parent = find_qgroup_rb(fs_info, parentid);
206 if (!member || !parent)
207 return -ENOENT;
208
209 list = kzalloc(sizeof(*list), GFP_ATOMIC);
210 if (!list)
211 return -ENOMEM;
212
213 list->group = parent;
214 list->member = member;
215 list_add_tail(&list->next_group, &member->groups);
216 list_add_tail(&list->next_member, &parent->members);
217
218 return 0;
219}
220
221/* must be called with qgroup_lock held */
222static int del_relation_rb(struct btrfs_fs_info *fs_info,
223 u64 memberid, u64 parentid)
224{
225 struct btrfs_qgroup *member;
226 struct btrfs_qgroup *parent;
227 struct btrfs_qgroup_list *list;
228
229 member = find_qgroup_rb(fs_info, memberid);
230 parent = find_qgroup_rb(fs_info, parentid);
231 if (!member || !parent)
232 return -ENOENT;
233
234 list_for_each_entry(list, &member->groups, next_group) {
235 if (list->group == parent) {
236 list_del(&list->next_group);
237 list_del(&list->next_member);
238 kfree(list);
239 return 0;
240 }
241 }
242 return -ENOENT;
243}
244
245/*
246 * The full config is read in one go, only called from open_ctree()
247 * It doesn't use any locking, as at this point we're still single-threaded
248 */
249int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
250{
251 struct btrfs_key key;
252 struct btrfs_key found_key;
253 struct btrfs_root *quota_root = fs_info->quota_root;
254 struct btrfs_path *path = NULL;
255 struct extent_buffer *l;
256 int slot;
257 int ret = 0;
258 u64 flags = 0;
Jan Schmidtb382a322013-05-28 15:47:24 +0000259 u64 rescan_progress = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200260
261 if (!fs_info->quota_enabled)
262 return 0;
263
Wang Shilong1e8f9152013-05-06 11:03:27 +0000264 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
265 if (!fs_info->qgroup_ulist) {
266 ret = -ENOMEM;
267 goto out;
268 }
269
Arne Jansenbed92ea2012-06-28 18:03:02 +0200270 path = btrfs_alloc_path();
271 if (!path) {
272 ret = -ENOMEM;
273 goto out;
274 }
275
276 /* default this to quota off, in case no status key is found */
277 fs_info->qgroup_flags = 0;
278
279 /*
280 * pass 1: read status, all qgroup infos and limits
281 */
282 key.objectid = 0;
283 key.type = 0;
284 key.offset = 0;
285 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
286 if (ret)
287 goto out;
288
289 while (1) {
290 struct btrfs_qgroup *qgroup;
291
292 slot = path->slots[0];
293 l = path->nodes[0];
294 btrfs_item_key_to_cpu(l, &found_key, slot);
295
296 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
297 struct btrfs_qgroup_status_item *ptr;
298
299 ptr = btrfs_item_ptr(l, slot,
300 struct btrfs_qgroup_status_item);
301
302 if (btrfs_qgroup_status_version(l, ptr) !=
303 BTRFS_QGROUP_STATUS_VERSION) {
304 printk(KERN_ERR
305 "btrfs: old qgroup version, quota disabled\n");
306 goto out;
307 }
308 if (btrfs_qgroup_status_generation(l, ptr) !=
309 fs_info->generation) {
310 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
311 printk(KERN_ERR
312 "btrfs: qgroup generation mismatch, "
313 "marked as inconsistent\n");
314 }
315 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
316 ptr);
Jan Schmidtb382a322013-05-28 15:47:24 +0000317 rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200318 goto next1;
319 }
320
321 if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
322 found_key.type != BTRFS_QGROUP_LIMIT_KEY)
323 goto next1;
324
325 qgroup = find_qgroup_rb(fs_info, found_key.offset);
326 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
327 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
328 printk(KERN_ERR "btrfs: inconsitent qgroup config\n");
329 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
330 }
331 if (!qgroup) {
332 qgroup = add_qgroup_rb(fs_info, found_key.offset);
333 if (IS_ERR(qgroup)) {
334 ret = PTR_ERR(qgroup);
335 goto out;
336 }
337 }
338 switch (found_key.type) {
339 case BTRFS_QGROUP_INFO_KEY: {
340 struct btrfs_qgroup_info_item *ptr;
341
342 ptr = btrfs_item_ptr(l, slot,
343 struct btrfs_qgroup_info_item);
344 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
345 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
346 qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
347 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
348 /* generation currently unused */
349 break;
350 }
351 case BTRFS_QGROUP_LIMIT_KEY: {
352 struct btrfs_qgroup_limit_item *ptr;
353
354 ptr = btrfs_item_ptr(l, slot,
355 struct btrfs_qgroup_limit_item);
356 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
357 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
358 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
359 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
360 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
361 break;
362 }
363 }
364next1:
365 ret = btrfs_next_item(quota_root, path);
366 if (ret < 0)
367 goto out;
368 if (ret)
369 break;
370 }
371 btrfs_release_path(path);
372
373 /*
374 * pass 2: read all qgroup relations
375 */
376 key.objectid = 0;
377 key.type = BTRFS_QGROUP_RELATION_KEY;
378 key.offset = 0;
379 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
380 if (ret)
381 goto out;
382 while (1) {
383 slot = path->slots[0];
384 l = path->nodes[0];
385 btrfs_item_key_to_cpu(l, &found_key, slot);
386
387 if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
388 goto next2;
389
390 if (found_key.objectid > found_key.offset) {
391 /* parent <- member, not needed to build config */
392 /* FIXME should we omit the key completely? */
393 goto next2;
394 }
395
396 ret = add_relation_rb(fs_info, found_key.objectid,
397 found_key.offset);
Arne Jansenff248582013-01-17 01:22:08 -0700398 if (ret == -ENOENT) {
399 printk(KERN_WARNING
400 "btrfs: orphan qgroup relation 0x%llx->0x%llx\n",
401 (unsigned long long)found_key.objectid,
402 (unsigned long long)found_key.offset);
403 ret = 0; /* ignore the error */
404 }
Arne Jansenbed92ea2012-06-28 18:03:02 +0200405 if (ret)
406 goto out;
407next2:
408 ret = btrfs_next_item(quota_root, path);
409 if (ret < 0)
410 goto out;
411 if (ret)
412 break;
413 }
414out:
415 fs_info->qgroup_flags |= flags;
416 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
417 fs_info->quota_enabled = 0;
418 fs_info->pending_quota_state = 0;
Jan Schmidtb382a322013-05-28 15:47:24 +0000419 } else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
420 ret >= 0) {
421 ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200422 }
423 btrfs_free_path(path);
424
Jan Schmidteb1716a2013-05-28 15:47:23 +0000425 if (ret < 0) {
Wang Shilong1e8f9152013-05-06 11:03:27 +0000426 ulist_free(fs_info->qgroup_ulist);
Jan Schmidteb1716a2013-05-28 15:47:23 +0000427 fs_info->qgroup_ulist = NULL;
Jan Schmidtb382a322013-05-28 15:47:24 +0000428 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
Jan Schmidteb1716a2013-05-28 15:47:23 +0000429 }
Wang Shilong1e8f9152013-05-06 11:03:27 +0000430
Arne Jansenbed92ea2012-06-28 18:03:02 +0200431 return ret < 0 ? ret : 0;
432}
433
434/*
Wang Shilonge685da12013-08-14 09:13:37 +0800435 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
436 * first two are in single-threaded paths.And for the third one, we have set
437 * quota_root to be null with qgroup_lock held before, so it is safe to clean
438 * up the in-memory structures without qgroup_lock held.
Arne Jansenbed92ea2012-06-28 18:03:02 +0200439 */
440void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
441{
442 struct rb_node *n;
443 struct btrfs_qgroup *qgroup;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200444
445 while ((n = rb_first(&fs_info->qgroup_tree))) {
446 qgroup = rb_entry(n, struct btrfs_qgroup, node);
447 rb_erase(n, &fs_info->qgroup_tree);
Wang Shilong4082bd32013-08-14 09:13:36 +0800448 __del_qgroup_rb(qgroup);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200449 }
Wang Shilong1e7bac12013-07-13 21:02:54 +0800450 /*
451 * we call btrfs_free_qgroup_config() when umounting
452 * filesystem and disabling quota, so we set qgroup_ulit
453 * to be null here to avoid double free.
454 */
Wang Shilong1e8f9152013-05-06 11:03:27 +0000455 ulist_free(fs_info->qgroup_ulist);
Wang Shilong1e7bac12013-07-13 21:02:54 +0800456 fs_info->qgroup_ulist = NULL;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200457}
458
459static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
460 struct btrfs_root *quota_root,
461 u64 src, u64 dst)
462{
463 int ret;
464 struct btrfs_path *path;
465 struct btrfs_key key;
466
467 path = btrfs_alloc_path();
468 if (!path)
469 return -ENOMEM;
470
471 key.objectid = src;
472 key.type = BTRFS_QGROUP_RELATION_KEY;
473 key.offset = dst;
474
475 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
476
477 btrfs_mark_buffer_dirty(path->nodes[0]);
478
479 btrfs_free_path(path);
480 return ret;
481}
482
483static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
484 struct btrfs_root *quota_root,
485 u64 src, u64 dst)
486{
487 int ret;
488 struct btrfs_path *path;
489 struct btrfs_key key;
490
491 path = btrfs_alloc_path();
492 if (!path)
493 return -ENOMEM;
494
495 key.objectid = src;
496 key.type = BTRFS_QGROUP_RELATION_KEY;
497 key.offset = dst;
498
499 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
500 if (ret < 0)
501 goto out;
502
503 if (ret > 0) {
504 ret = -ENOENT;
505 goto out;
506 }
507
508 ret = btrfs_del_item(trans, quota_root, path);
509out:
510 btrfs_free_path(path);
511 return ret;
512}
513
514static int add_qgroup_item(struct btrfs_trans_handle *trans,
515 struct btrfs_root *quota_root, u64 qgroupid)
516{
517 int ret;
518 struct btrfs_path *path;
519 struct btrfs_qgroup_info_item *qgroup_info;
520 struct btrfs_qgroup_limit_item *qgroup_limit;
521 struct extent_buffer *leaf;
522 struct btrfs_key key;
523
524 path = btrfs_alloc_path();
525 if (!path)
526 return -ENOMEM;
527
528 key.objectid = 0;
529 key.type = BTRFS_QGROUP_INFO_KEY;
530 key.offset = qgroupid;
531
532 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
533 sizeof(*qgroup_info));
534 if (ret)
535 goto out;
536
537 leaf = path->nodes[0];
538 qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
539 struct btrfs_qgroup_info_item);
540 btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
541 btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
542 btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
543 btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
544 btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
545
546 btrfs_mark_buffer_dirty(leaf);
547
548 btrfs_release_path(path);
549
550 key.type = BTRFS_QGROUP_LIMIT_KEY;
551 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
552 sizeof(*qgroup_limit));
553 if (ret)
554 goto out;
555
556 leaf = path->nodes[0];
557 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
558 struct btrfs_qgroup_limit_item);
559 btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
560 btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
561 btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
562 btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
563 btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
564
565 btrfs_mark_buffer_dirty(leaf);
566
567 ret = 0;
568out:
569 btrfs_free_path(path);
570 return ret;
571}
572
573static int del_qgroup_item(struct btrfs_trans_handle *trans,
574 struct btrfs_root *quota_root, u64 qgroupid)
575{
576 int ret;
577 struct btrfs_path *path;
578 struct btrfs_key key;
579
580 path = btrfs_alloc_path();
581 if (!path)
582 return -ENOMEM;
583
584 key.objectid = 0;
585 key.type = BTRFS_QGROUP_INFO_KEY;
586 key.offset = qgroupid;
587 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
588 if (ret < 0)
589 goto out;
590
591 if (ret > 0) {
592 ret = -ENOENT;
593 goto out;
594 }
595
596 ret = btrfs_del_item(trans, quota_root, path);
597 if (ret)
598 goto out;
599
600 btrfs_release_path(path);
601
602 key.type = BTRFS_QGROUP_LIMIT_KEY;
603 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
604 if (ret < 0)
605 goto out;
606
607 if (ret > 0) {
608 ret = -ENOENT;
609 goto out;
610 }
611
612 ret = btrfs_del_item(trans, quota_root, path);
613
614out:
615 btrfs_free_path(path);
616 return ret;
617}
618
619static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
620 struct btrfs_root *root, u64 qgroupid,
621 u64 flags, u64 max_rfer, u64 max_excl,
622 u64 rsv_rfer, u64 rsv_excl)
623{
624 struct btrfs_path *path;
625 struct btrfs_key key;
626 struct extent_buffer *l;
627 struct btrfs_qgroup_limit_item *qgroup_limit;
628 int ret;
629 int slot;
630
631 key.objectid = 0;
632 key.type = BTRFS_QGROUP_LIMIT_KEY;
633 key.offset = qgroupid;
634
635 path = btrfs_alloc_path();
Wang Shilong84cbe2f2013-02-27 11:20:56 +0000636 if (!path)
637 return -ENOMEM;
638
Arne Jansenbed92ea2012-06-28 18:03:02 +0200639 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
640 if (ret > 0)
641 ret = -ENOENT;
642
643 if (ret)
644 goto out;
645
646 l = path->nodes[0];
647 slot = path->slots[0];
648 qgroup_limit = btrfs_item_ptr(l, path->slots[0],
649 struct btrfs_qgroup_limit_item);
650 btrfs_set_qgroup_limit_flags(l, qgroup_limit, flags);
651 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, max_rfer);
652 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, max_excl);
653 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, rsv_rfer);
654 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, rsv_excl);
655
656 btrfs_mark_buffer_dirty(l);
657
658out:
659 btrfs_free_path(path);
660 return ret;
661}
662
663static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
664 struct btrfs_root *root,
665 struct btrfs_qgroup *qgroup)
666{
667 struct btrfs_path *path;
668 struct btrfs_key key;
669 struct extent_buffer *l;
670 struct btrfs_qgroup_info_item *qgroup_info;
671 int ret;
672 int slot;
673
674 key.objectid = 0;
675 key.type = BTRFS_QGROUP_INFO_KEY;
676 key.offset = qgroup->qgroupid;
677
678 path = btrfs_alloc_path();
Wang Shilong84cbe2f2013-02-27 11:20:56 +0000679 if (!path)
680 return -ENOMEM;
681
Arne Jansenbed92ea2012-06-28 18:03:02 +0200682 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
683 if (ret > 0)
684 ret = -ENOENT;
685
686 if (ret)
687 goto out;
688
689 l = path->nodes[0];
690 slot = path->slots[0];
691 qgroup_info = btrfs_item_ptr(l, path->slots[0],
692 struct btrfs_qgroup_info_item);
693 btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
694 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
695 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
696 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
697 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
698
699 btrfs_mark_buffer_dirty(l);
700
701out:
702 btrfs_free_path(path);
703 return ret;
704}
705
706static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
707 struct btrfs_fs_info *fs_info,
708 struct btrfs_root *root)
709{
710 struct btrfs_path *path;
711 struct btrfs_key key;
712 struct extent_buffer *l;
713 struct btrfs_qgroup_status_item *ptr;
714 int ret;
715 int slot;
716
717 key.objectid = 0;
718 key.type = BTRFS_QGROUP_STATUS_KEY;
719 key.offset = 0;
720
721 path = btrfs_alloc_path();
Wang Shilong84cbe2f2013-02-27 11:20:56 +0000722 if (!path)
723 return -ENOMEM;
724
Arne Jansenbed92ea2012-06-28 18:03:02 +0200725 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
726 if (ret > 0)
727 ret = -ENOENT;
728
729 if (ret)
730 goto out;
731
732 l = path->nodes[0];
733 slot = path->slots[0];
734 ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
735 btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
736 btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
Jan Schmidt2f232032013-04-25 16:04:51 +0000737 btrfs_set_qgroup_status_rescan(l, ptr,
738 fs_info->qgroup_rescan_progress.objectid);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200739
740 btrfs_mark_buffer_dirty(l);
741
742out:
743 btrfs_free_path(path);
744 return ret;
745}
746
747/*
748 * called with qgroup_lock held
749 */
750static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
751 struct btrfs_root *root)
752{
753 struct btrfs_path *path;
754 struct btrfs_key key;
Wang Shilong06b3a862013-02-27 11:16:57 +0000755 struct extent_buffer *leaf = NULL;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200756 int ret;
Wang Shilong06b3a862013-02-27 11:16:57 +0000757 int nr = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200758
Arne Jansenbed92ea2012-06-28 18:03:02 +0200759 path = btrfs_alloc_path();
760 if (!path)
761 return -ENOMEM;
762
Wang Shilong06b3a862013-02-27 11:16:57 +0000763 path->leave_spinning = 1;
764
765 key.objectid = 0;
766 key.offset = 0;
767 key.type = 0;
768
Arne Jansenbed92ea2012-06-28 18:03:02 +0200769 while (1) {
Arne Jansenbed92ea2012-06-28 18:03:02 +0200770 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
Wang Shilong06b3a862013-02-27 11:16:57 +0000771 if (ret < 0)
772 goto out;
773 leaf = path->nodes[0];
774 nr = btrfs_header_nritems(leaf);
775 if (!nr)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200776 break;
Wang Shilong06b3a862013-02-27 11:16:57 +0000777 /*
778 * delete the leaf one by one
779 * since the whole tree is going
780 * to be deleted.
781 */
782 path->slots[0] = 0;
783 ret = btrfs_del_items(trans, root, path, 0, nr);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200784 if (ret)
785 goto out;
Wang Shilong06b3a862013-02-27 11:16:57 +0000786
Arne Jansenbed92ea2012-06-28 18:03:02 +0200787 btrfs_release_path(path);
788 }
789 ret = 0;
790out:
791 root->fs_info->pending_quota_state = 0;
792 btrfs_free_path(path);
793 return ret;
794}
795
796int btrfs_quota_enable(struct btrfs_trans_handle *trans,
797 struct btrfs_fs_info *fs_info)
798{
799 struct btrfs_root *quota_root;
Wang Shilong7708f022013-04-07 10:24:57 +0000800 struct btrfs_root *tree_root = fs_info->tree_root;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200801 struct btrfs_path *path = NULL;
802 struct btrfs_qgroup_status_item *ptr;
803 struct extent_buffer *leaf;
804 struct btrfs_key key;
Wang Shilong7708f022013-04-07 10:24:57 +0000805 struct btrfs_key found_key;
806 struct btrfs_qgroup *qgroup = NULL;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200807 int ret = 0;
Wang Shilong7708f022013-04-07 10:24:57 +0000808 int slot;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200809
Wang Shilongf2f6ed32013-04-07 10:50:16 +0000810 mutex_lock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200811 if (fs_info->quota_root) {
812 fs_info->pending_quota_state = 1;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200813 goto out;
814 }
Arne Jansenbed92ea2012-06-28 18:03:02 +0200815
Wang Shilong1e8f9152013-05-06 11:03:27 +0000816 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
817 if (!fs_info->qgroup_ulist) {
818 ret = -ENOMEM;
819 goto out;
820 }
821
Arne Jansenbed92ea2012-06-28 18:03:02 +0200822 /*
823 * initially create the quota tree
824 */
825 quota_root = btrfs_create_tree(trans, fs_info,
826 BTRFS_QUOTA_TREE_OBJECTID);
827 if (IS_ERR(quota_root)) {
828 ret = PTR_ERR(quota_root);
829 goto out;
830 }
831
832 path = btrfs_alloc_path();
Tsutomu Itoh5b7ff5b2012-10-16 05:44:21 +0000833 if (!path) {
834 ret = -ENOMEM;
835 goto out_free_root;
836 }
Arne Jansenbed92ea2012-06-28 18:03:02 +0200837
838 key.objectid = 0;
839 key.type = BTRFS_QGROUP_STATUS_KEY;
840 key.offset = 0;
841
842 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
843 sizeof(*ptr));
844 if (ret)
Tsutomu Itoh5b7ff5b2012-10-16 05:44:21 +0000845 goto out_free_path;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200846
847 leaf = path->nodes[0];
848 ptr = btrfs_item_ptr(leaf, path->slots[0],
849 struct btrfs_qgroup_status_item);
850 btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
851 btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
852 fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
853 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
854 btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
Jan Schmidt2f232032013-04-25 16:04:51 +0000855 btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200856
857 btrfs_mark_buffer_dirty(leaf);
858
Wang Shilong7708f022013-04-07 10:24:57 +0000859 key.objectid = 0;
860 key.type = BTRFS_ROOT_REF_KEY;
861 key.offset = 0;
862
863 btrfs_release_path(path);
864 ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
865 if (ret > 0)
866 goto out_add_root;
867 if (ret < 0)
868 goto out_free_path;
869
870
871 while (1) {
872 slot = path->slots[0];
873 leaf = path->nodes[0];
874 btrfs_item_key_to_cpu(leaf, &found_key, slot);
875
876 if (found_key.type == BTRFS_ROOT_REF_KEY) {
877 ret = add_qgroup_item(trans, quota_root,
878 found_key.offset);
879 if (ret)
880 goto out_free_path;
881
Wang Shilong7708f022013-04-07 10:24:57 +0000882 qgroup = add_qgroup_rb(fs_info, found_key.offset);
883 if (IS_ERR(qgroup)) {
Wang Shilong7708f022013-04-07 10:24:57 +0000884 ret = PTR_ERR(qgroup);
885 goto out_free_path;
886 }
Wang Shilong7708f022013-04-07 10:24:57 +0000887 }
888 ret = btrfs_next_item(tree_root, path);
889 if (ret < 0)
890 goto out_free_path;
891 if (ret)
892 break;
893 }
894
895out_add_root:
896 btrfs_release_path(path);
897 ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
898 if (ret)
899 goto out_free_path;
900
Wang Shilong7708f022013-04-07 10:24:57 +0000901 qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
902 if (IS_ERR(qgroup)) {
Wang Shilong7708f022013-04-07 10:24:57 +0000903 ret = PTR_ERR(qgroup);
904 goto out_free_path;
905 }
Wang Shilong58400fc2013-04-07 10:50:17 +0000906 spin_lock(&fs_info->qgroup_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200907 fs_info->quota_root = quota_root;
908 fs_info->pending_quota_state = 1;
909 spin_unlock(&fs_info->qgroup_lock);
Tsutomu Itoh5b7ff5b2012-10-16 05:44:21 +0000910out_free_path:
Arne Jansenbed92ea2012-06-28 18:03:02 +0200911 btrfs_free_path(path);
Tsutomu Itoh5b7ff5b2012-10-16 05:44:21 +0000912out_free_root:
913 if (ret) {
914 free_extent_buffer(quota_root->node);
915 free_extent_buffer(quota_root->commit_root);
916 kfree(quota_root);
917 }
918out:
Jan Schmidteb1716a2013-05-28 15:47:23 +0000919 if (ret) {
Wang Shilong1e8f9152013-05-06 11:03:27 +0000920 ulist_free(fs_info->qgroup_ulist);
Jan Schmidteb1716a2013-05-28 15:47:23 +0000921 fs_info->qgroup_ulist = NULL;
922 }
Wang Shilongf2f6ed32013-04-07 10:50:16 +0000923 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200924 return ret;
925}
926
927int btrfs_quota_disable(struct btrfs_trans_handle *trans,
928 struct btrfs_fs_info *fs_info)
929{
930 struct btrfs_root *tree_root = fs_info->tree_root;
931 struct btrfs_root *quota_root;
932 int ret = 0;
933
Wang Shilongf2f6ed32013-04-07 10:50:16 +0000934 mutex_lock(&fs_info->qgroup_ioctl_lock);
Wang Shilong58400fc2013-04-07 10:50:17 +0000935 if (!fs_info->quota_root)
Wang Shilongf2f6ed32013-04-07 10:50:16 +0000936 goto out;
Wang Shilong58400fc2013-04-07 10:50:17 +0000937 spin_lock(&fs_info->qgroup_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200938 fs_info->quota_enabled = 0;
939 fs_info->pending_quota_state = 0;
940 quota_root = fs_info->quota_root;
941 fs_info->quota_root = NULL;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200942 spin_unlock(&fs_info->qgroup_lock);
943
Wang Shilonge685da12013-08-14 09:13:37 +0800944 btrfs_free_qgroup_config(fs_info);
945
Wang Shilongf2f6ed32013-04-07 10:50:16 +0000946 if (!quota_root) {
947 ret = -EINVAL;
948 goto out;
949 }
Arne Jansenbed92ea2012-06-28 18:03:02 +0200950
951 ret = btrfs_clean_quota_tree(trans, quota_root);
952 if (ret)
953 goto out;
954
955 ret = btrfs_del_root(trans, tree_root, &quota_root->root_key);
956 if (ret)
957 goto out;
958
959 list_del(&quota_root->dirty_list);
960
961 btrfs_tree_lock(quota_root->node);
962 clean_tree_block(trans, tree_root, quota_root->node);
963 btrfs_tree_unlock(quota_root->node);
964 btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
965
966 free_extent_buffer(quota_root->node);
967 free_extent_buffer(quota_root->commit_root);
968 kfree(quota_root);
969out:
Wang Shilongf2f6ed32013-04-07 10:50:16 +0000970 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200971 return ret;
972}
973
Jan Schmidt2f232032013-04-25 16:04:51 +0000974static void qgroup_dirty(struct btrfs_fs_info *fs_info,
975 struct btrfs_qgroup *qgroup)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200976{
Jan Schmidt2f232032013-04-25 16:04:51 +0000977 if (list_empty(&qgroup->dirty))
978 list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200979}
980
981int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
982 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
983{
984 struct btrfs_root *quota_root;
Wang Shilongb7fef4f2013-04-07 10:50:18 +0000985 struct btrfs_qgroup *parent;
986 struct btrfs_qgroup *member;
Wang Shilong534e6622013-04-17 14:49:51 +0000987 struct btrfs_qgroup_list *list;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200988 int ret = 0;
989
Wang Shilongf2f6ed32013-04-07 10:50:16 +0000990 mutex_lock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200991 quota_root = fs_info->quota_root;
Wang Shilongf2f6ed32013-04-07 10:50:16 +0000992 if (!quota_root) {
993 ret = -EINVAL;
994 goto out;
995 }
Wang Shilongb7fef4f2013-04-07 10:50:18 +0000996 member = find_qgroup_rb(fs_info, src);
997 parent = find_qgroup_rb(fs_info, dst);
998 if (!member || !parent) {
999 ret = -EINVAL;
1000 goto out;
1001 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001002
Wang Shilong534e6622013-04-17 14:49:51 +00001003 /* check if such qgroup relation exist firstly */
1004 list_for_each_entry(list, &member->groups, next_group) {
1005 if (list->group == parent) {
1006 ret = -EEXIST;
1007 goto out;
1008 }
1009 }
1010
Arne Jansenbed92ea2012-06-28 18:03:02 +02001011 ret = add_qgroup_relation_item(trans, quota_root, src, dst);
1012 if (ret)
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001013 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001014
1015 ret = add_qgroup_relation_item(trans, quota_root, dst, src);
1016 if (ret) {
1017 del_qgroup_relation_item(trans, quota_root, src, dst);
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001018 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001019 }
1020
1021 spin_lock(&fs_info->qgroup_lock);
1022 ret = add_relation_rb(quota_root->fs_info, src, dst);
1023 spin_unlock(&fs_info->qgroup_lock);
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001024out:
1025 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001026 return ret;
1027}
1028
1029int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
1030 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1031{
1032 struct btrfs_root *quota_root;
Wang Shilong534e6622013-04-17 14:49:51 +00001033 struct btrfs_qgroup *parent;
1034 struct btrfs_qgroup *member;
1035 struct btrfs_qgroup_list *list;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001036 int ret = 0;
1037 int err;
1038
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001039 mutex_lock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001040 quota_root = fs_info->quota_root;
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001041 if (!quota_root) {
1042 ret = -EINVAL;
1043 goto out;
1044 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001045
Wang Shilong534e6622013-04-17 14:49:51 +00001046 member = find_qgroup_rb(fs_info, src);
1047 parent = find_qgroup_rb(fs_info, dst);
1048 if (!member || !parent) {
1049 ret = -EINVAL;
1050 goto out;
1051 }
1052
1053 /* check if such qgroup relation exist firstly */
1054 list_for_each_entry(list, &member->groups, next_group) {
1055 if (list->group == parent)
1056 goto exist;
1057 }
1058 ret = -ENOENT;
1059 goto out;
1060exist:
Arne Jansenbed92ea2012-06-28 18:03:02 +02001061 ret = del_qgroup_relation_item(trans, quota_root, src, dst);
1062 err = del_qgroup_relation_item(trans, quota_root, dst, src);
1063 if (err && !ret)
1064 ret = err;
1065
1066 spin_lock(&fs_info->qgroup_lock);
1067 del_relation_rb(fs_info, src, dst);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001068 spin_unlock(&fs_info->qgroup_lock);
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001069out:
1070 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001071 return ret;
1072}
1073
1074int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
1075 struct btrfs_fs_info *fs_info, u64 qgroupid, char *name)
1076{
1077 struct btrfs_root *quota_root;
1078 struct btrfs_qgroup *qgroup;
1079 int ret = 0;
1080
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001081 mutex_lock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001082 quota_root = fs_info->quota_root;
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001083 if (!quota_root) {
1084 ret = -EINVAL;
1085 goto out;
1086 }
Wang Shilong534e6622013-04-17 14:49:51 +00001087 qgroup = find_qgroup_rb(fs_info, qgroupid);
1088 if (qgroup) {
1089 ret = -EEXIST;
1090 goto out;
1091 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001092
1093 ret = add_qgroup_item(trans, quota_root, qgroupid);
Wang Shilong534e6622013-04-17 14:49:51 +00001094 if (ret)
1095 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001096
1097 spin_lock(&fs_info->qgroup_lock);
1098 qgroup = add_qgroup_rb(fs_info, qgroupid);
1099 spin_unlock(&fs_info->qgroup_lock);
1100
1101 if (IS_ERR(qgroup))
1102 ret = PTR_ERR(qgroup);
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001103out:
1104 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001105 return ret;
1106}
1107
1108int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
1109 struct btrfs_fs_info *fs_info, u64 qgroupid)
1110{
1111 struct btrfs_root *quota_root;
Arne Jansen2cf68702013-01-17 01:22:09 -07001112 struct btrfs_qgroup *qgroup;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001113 int ret = 0;
1114
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001115 mutex_lock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001116 quota_root = fs_info->quota_root;
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001117 if (!quota_root) {
1118 ret = -EINVAL;
1119 goto out;
1120 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001121
Arne Jansen2cf68702013-01-17 01:22:09 -07001122 qgroup = find_qgroup_rb(fs_info, qgroupid);
Wang Shilong534e6622013-04-17 14:49:51 +00001123 if (!qgroup) {
1124 ret = -ENOENT;
1125 goto out;
1126 } else {
1127 /* check if there are no relations to this qgroup */
1128 if (!list_empty(&qgroup->groups) ||
1129 !list_empty(&qgroup->members)) {
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001130 ret = -EBUSY;
1131 goto out;
Arne Jansen2cf68702013-01-17 01:22:09 -07001132 }
1133 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001134 ret = del_qgroup_item(trans, quota_root, qgroupid);
1135
1136 spin_lock(&fs_info->qgroup_lock);
1137 del_qgroup_rb(quota_root->fs_info, qgroupid);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001138 spin_unlock(&fs_info->qgroup_lock);
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001139out:
1140 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001141 return ret;
1142}
1143
1144int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1145 struct btrfs_fs_info *fs_info, u64 qgroupid,
1146 struct btrfs_qgroup_limit *limit)
1147{
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001148 struct btrfs_root *quota_root;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001149 struct btrfs_qgroup *qgroup;
1150 int ret = 0;
1151
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001152 mutex_lock(&fs_info->qgroup_ioctl_lock);
1153 quota_root = fs_info->quota_root;
1154 if (!quota_root) {
1155 ret = -EINVAL;
1156 goto out;
1157 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001158
Wang Shilongddb47af2013-04-07 10:50:20 +00001159 qgroup = find_qgroup_rb(fs_info, qgroupid);
1160 if (!qgroup) {
1161 ret = -ENOENT;
1162 goto out;
1163 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001164 ret = update_qgroup_limit_item(trans, quota_root, qgroupid,
1165 limit->flags, limit->max_rfer,
1166 limit->max_excl, limit->rsv_rfer,
1167 limit->rsv_excl);
1168 if (ret) {
1169 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1170 printk(KERN_INFO "unable to update quota limit for %llu\n",
1171 (unsigned long long)qgroupid);
1172 }
1173
Wang Shilong58400fc2013-04-07 10:50:17 +00001174 spin_lock(&fs_info->qgroup_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001175 qgroup->lim_flags = limit->flags;
1176 qgroup->max_rfer = limit->max_rfer;
1177 qgroup->max_excl = limit->max_excl;
1178 qgroup->rsv_rfer = limit->rsv_rfer;
1179 qgroup->rsv_excl = limit->rsv_excl;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001180 spin_unlock(&fs_info->qgroup_lock);
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001181out:
1182 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001183 return ret;
1184}
1185
Arne Jansenbed92ea2012-06-28 18:03:02 +02001186/*
1187 * btrfs_qgroup_record_ref is called when the ref is added or deleted. it puts
1188 * the modification into a list that's later used by btrfs_end_transaction to
1189 * pass the recorded modifications on to btrfs_qgroup_account_ref.
1190 */
1191int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
1192 struct btrfs_delayed_ref_node *node,
1193 struct btrfs_delayed_extent_op *extent_op)
1194{
1195 struct qgroup_update *u;
1196
1197 BUG_ON(!trans->delayed_ref_elem.seq);
1198 u = kmalloc(sizeof(*u), GFP_NOFS);
1199 if (!u)
1200 return -ENOMEM;
1201
1202 u->node = node;
1203 u->extent_op = extent_op;
1204 list_add_tail(&u->list, &trans->qgroup_ref_list);
1205
1206 return 0;
1207}
1208
Jan Schmidt46b665c2013-04-25 16:04:50 +00001209static int qgroup_account_ref_step1(struct btrfs_fs_info *fs_info,
1210 struct ulist *roots, struct ulist *tmp,
1211 u64 seq)
1212{
1213 struct ulist_node *unode;
1214 struct ulist_iterator uiter;
1215 struct ulist_node *tmp_unode;
1216 struct ulist_iterator tmp_uiter;
1217 struct btrfs_qgroup *qg;
1218 int ret;
1219
1220 ULIST_ITER_INIT(&uiter);
1221 while ((unode = ulist_next(roots, &uiter))) {
1222 qg = find_qgroup_rb(fs_info, unode->val);
1223 if (!qg)
1224 continue;
1225
1226 ulist_reinit(tmp);
1227 /* XXX id not needed */
1228 ret = ulist_add(tmp, qg->qgroupid,
1229 (u64)(uintptr_t)qg, GFP_ATOMIC);
1230 if (ret < 0)
1231 return ret;
1232 ULIST_ITER_INIT(&tmp_uiter);
1233 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1234 struct btrfs_qgroup_list *glist;
1235
1236 qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
1237 if (qg->refcnt < seq)
1238 qg->refcnt = seq + 1;
1239 else
1240 ++qg->refcnt;
1241
1242 list_for_each_entry(glist, &qg->groups, next_group) {
1243 ret = ulist_add(tmp, glist->group->qgroupid,
1244 (u64)(uintptr_t)glist->group,
1245 GFP_ATOMIC);
1246 if (ret < 0)
1247 return ret;
1248 }
1249 }
1250 }
1251
1252 return 0;
1253}
1254
1255static int qgroup_account_ref_step2(struct btrfs_fs_info *fs_info,
1256 struct ulist *roots, struct ulist *tmp,
1257 u64 seq, int sgn, u64 num_bytes,
1258 struct btrfs_qgroup *qgroup)
1259{
1260 struct ulist_node *unode;
1261 struct ulist_iterator uiter;
1262 struct btrfs_qgroup *qg;
1263 struct btrfs_qgroup_list *glist;
1264 int ret;
1265
1266 ulist_reinit(tmp);
1267 ret = ulist_add(tmp, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
1268 if (ret < 0)
1269 return ret;
1270
1271 ULIST_ITER_INIT(&uiter);
1272 while ((unode = ulist_next(tmp, &uiter))) {
1273 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
1274 if (qg->refcnt < seq) {
1275 /* not visited by step 1 */
1276 qg->rfer += sgn * num_bytes;
1277 qg->rfer_cmpr += sgn * num_bytes;
1278 if (roots->nnodes == 0) {
1279 qg->excl += sgn * num_bytes;
1280 qg->excl_cmpr += sgn * num_bytes;
1281 }
1282 qgroup_dirty(fs_info, qg);
1283 }
1284 WARN_ON(qg->tag >= seq);
1285 qg->tag = seq;
1286
1287 list_for_each_entry(glist, &qg->groups, next_group) {
1288 ret = ulist_add(tmp, glist->group->qgroupid,
1289 (uintptr_t)glist->group, GFP_ATOMIC);
1290 if (ret < 0)
1291 return ret;
1292 }
1293 }
1294
1295 return 0;
1296}
1297
1298static int qgroup_account_ref_step3(struct btrfs_fs_info *fs_info,
1299 struct ulist *roots, struct ulist *tmp,
1300 u64 seq, int sgn, u64 num_bytes)
1301{
1302 struct ulist_node *unode;
1303 struct ulist_iterator uiter;
1304 struct btrfs_qgroup *qg;
1305 struct ulist_node *tmp_unode;
1306 struct ulist_iterator tmp_uiter;
1307 int ret;
1308
1309 ULIST_ITER_INIT(&uiter);
1310 while ((unode = ulist_next(roots, &uiter))) {
1311 qg = find_qgroup_rb(fs_info, unode->val);
1312 if (!qg)
1313 continue;
1314
1315 ulist_reinit(tmp);
1316 ret = ulist_add(tmp, qg->qgroupid, (uintptr_t)qg, GFP_ATOMIC);
1317 if (ret < 0)
1318 return ret;
1319
1320 ULIST_ITER_INIT(&tmp_uiter);
1321 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1322 struct btrfs_qgroup_list *glist;
1323
1324 qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
1325 if (qg->tag == seq)
1326 continue;
1327
1328 if (qg->refcnt - seq == roots->nnodes) {
1329 qg->excl -= sgn * num_bytes;
1330 qg->excl_cmpr -= sgn * num_bytes;
1331 qgroup_dirty(fs_info, qg);
1332 }
1333
1334 list_for_each_entry(glist, &qg->groups, next_group) {
1335 ret = ulist_add(tmp, glist->group->qgroupid,
1336 (uintptr_t)glist->group,
1337 GFP_ATOMIC);
1338 if (ret < 0)
1339 return ret;
1340 }
1341 }
1342 }
1343
1344 return 0;
1345}
1346
Arne Jansenbed92ea2012-06-28 18:03:02 +02001347/*
1348 * btrfs_qgroup_account_ref is called for every ref that is added to or deleted
1349 * from the fs. First, all roots referencing the extent are searched, and
1350 * then the space is accounted accordingly to the different roots. The
1351 * accounting algorithm works in 3 steps documented inline.
1352 */
1353int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
1354 struct btrfs_fs_info *fs_info,
1355 struct btrfs_delayed_ref_node *node,
1356 struct btrfs_delayed_extent_op *extent_op)
1357{
1358 struct btrfs_key ins;
1359 struct btrfs_root *quota_root;
1360 u64 ref_root;
1361 struct btrfs_qgroup *qgroup;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001362 struct ulist *roots = NULL;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001363 u64 seq;
1364 int ret = 0;
1365 int sgn;
1366
1367 if (!fs_info->quota_enabled)
1368 return 0;
1369
1370 BUG_ON(!fs_info->quota_root);
1371
1372 ins.objectid = node->bytenr;
1373 ins.offset = node->num_bytes;
1374 ins.type = BTRFS_EXTENT_ITEM_KEY;
1375
1376 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1377 node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
1378 struct btrfs_delayed_tree_ref *ref;
1379 ref = btrfs_delayed_node_to_tree_ref(node);
1380 ref_root = ref->root;
1381 } else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1382 node->type == BTRFS_SHARED_DATA_REF_KEY) {
1383 struct btrfs_delayed_data_ref *ref;
1384 ref = btrfs_delayed_node_to_data_ref(node);
1385 ref_root = ref->root;
1386 } else {
1387 BUG();
1388 }
1389
1390 if (!is_fstree(ref_root)) {
1391 /*
1392 * non-fs-trees are not being accounted
1393 */
1394 return 0;
1395 }
1396
1397 switch (node->action) {
1398 case BTRFS_ADD_DELAYED_REF:
1399 case BTRFS_ADD_DELAYED_EXTENT:
1400 sgn = 1;
Jan Schmidtfc36ed7e2013-04-24 16:57:33 +00001401 seq = btrfs_tree_mod_seq_prev(node->seq);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001402 break;
1403 case BTRFS_DROP_DELAYED_REF:
1404 sgn = -1;
Jan Schmidtfc36ed7e2013-04-24 16:57:33 +00001405 seq = node->seq;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001406 break;
1407 case BTRFS_UPDATE_DELAYED_HEAD:
1408 return 0;
1409 default:
1410 BUG();
1411 }
1412
Jan Schmidt2f232032013-04-25 16:04:51 +00001413 mutex_lock(&fs_info->qgroup_rescan_lock);
1414 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
1415 if (fs_info->qgroup_rescan_progress.objectid <= node->bytenr) {
1416 mutex_unlock(&fs_info->qgroup_rescan_lock);
1417 return 0;
1418 }
1419 }
1420 mutex_unlock(&fs_info->qgroup_rescan_lock);
1421
Arne Jansenbed92ea2012-06-28 18:03:02 +02001422 /*
1423 * the delayed ref sequence number we pass depends on the direction of
Jan Schmidtfc36ed7e2013-04-24 16:57:33 +00001424 * the operation. for add operations, we pass
1425 * tree_mod_log_prev_seq(node->seq) to skip
Arne Jansenbed92ea2012-06-28 18:03:02 +02001426 * the delayed ref's current sequence number, because we need the state
1427 * of the tree before the add operation. for delete operations, we pass
1428 * (node->seq) to include the delayed ref's current sequence number,
1429 * because we need the state of the tree after the delete operation.
1430 */
Jan Schmidtfc36ed7e2013-04-24 16:57:33 +00001431 ret = btrfs_find_all_roots(trans, fs_info, node->bytenr, seq, &roots);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001432 if (ret < 0)
Wang Shilonga7975022013-03-25 11:08:23 +00001433 return ret;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001434
1435 spin_lock(&fs_info->qgroup_lock);
Jan Schmidt2f232032013-04-25 16:04:51 +00001436
Arne Jansenbed92ea2012-06-28 18:03:02 +02001437 quota_root = fs_info->quota_root;
1438 if (!quota_root)
1439 goto unlock;
1440
1441 qgroup = find_qgroup_rb(fs_info, ref_root);
1442 if (!qgroup)
1443 goto unlock;
1444
1445 /*
1446 * step 1: for each old ref, visit all nodes once and inc refcnt
1447 */
Wang Shilong1e8f9152013-05-06 11:03:27 +00001448 ulist_reinit(fs_info->qgroup_ulist);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001449 seq = fs_info->qgroup_seq;
1450 fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
1451
Wang Shilong1e8f9152013-05-06 11:03:27 +00001452 ret = qgroup_account_ref_step1(fs_info, roots, fs_info->qgroup_ulist,
1453 seq);
Jan Schmidt46b665c2013-04-25 16:04:50 +00001454 if (ret)
1455 goto unlock;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001456
1457 /*
1458 * step 2: walk from the new root
1459 */
Wang Shilong1e8f9152013-05-06 11:03:27 +00001460 ret = qgroup_account_ref_step2(fs_info, roots, fs_info->qgroup_ulist,
1461 seq, sgn, node->num_bytes, qgroup);
Jan Schmidt46b665c2013-04-25 16:04:50 +00001462 if (ret)
Wang Shilong3c971852013-04-17 14:00:36 +00001463 goto unlock;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001464
1465 /*
1466 * step 3: walk again from old refs
1467 */
Wang Shilong1e8f9152013-05-06 11:03:27 +00001468 ret = qgroup_account_ref_step3(fs_info, roots, fs_info->qgroup_ulist,
1469 seq, sgn, node->num_bytes);
Jan Schmidt46b665c2013-04-25 16:04:50 +00001470 if (ret)
1471 goto unlock;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001472
Arne Jansenbed92ea2012-06-28 18:03:02 +02001473unlock:
1474 spin_unlock(&fs_info->qgroup_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001475 ulist_free(roots);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001476
1477 return ret;
1478}
1479
1480/*
1481 * called from commit_transaction. Writes all changed qgroups to disk.
1482 */
1483int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
1484 struct btrfs_fs_info *fs_info)
1485{
1486 struct btrfs_root *quota_root = fs_info->quota_root;
1487 int ret = 0;
Jan Schmidt3d7b5a22013-04-25 16:04:52 +00001488 int start_rescan_worker = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001489
1490 if (!quota_root)
1491 goto out;
1492
Jan Schmidt3d7b5a22013-04-25 16:04:52 +00001493 if (!fs_info->quota_enabled && fs_info->pending_quota_state)
1494 start_rescan_worker = 1;
1495
Arne Jansenbed92ea2012-06-28 18:03:02 +02001496 fs_info->quota_enabled = fs_info->pending_quota_state;
1497
1498 spin_lock(&fs_info->qgroup_lock);
1499 while (!list_empty(&fs_info->dirty_qgroups)) {
1500 struct btrfs_qgroup *qgroup;
1501 qgroup = list_first_entry(&fs_info->dirty_qgroups,
1502 struct btrfs_qgroup, dirty);
1503 list_del_init(&qgroup->dirty);
1504 spin_unlock(&fs_info->qgroup_lock);
1505 ret = update_qgroup_info_item(trans, quota_root, qgroup);
1506 if (ret)
1507 fs_info->qgroup_flags |=
1508 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1509 spin_lock(&fs_info->qgroup_lock);
1510 }
1511 if (fs_info->quota_enabled)
1512 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
1513 else
1514 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1515 spin_unlock(&fs_info->qgroup_lock);
1516
1517 ret = update_qgroup_status_item(trans, fs_info, quota_root);
1518 if (ret)
1519 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1520
Jan Schmidt3d7b5a22013-04-25 16:04:52 +00001521 if (!ret && start_rescan_worker) {
Jan Schmidtb382a322013-05-28 15:47:24 +00001522 ret = qgroup_rescan_init(fs_info, 0, 1);
1523 if (!ret) {
1524 qgroup_rescan_zero_tracking(fs_info);
1525 btrfs_queue_worker(&fs_info->qgroup_rescan_workers,
1526 &fs_info->qgroup_rescan_work);
1527 }
Jan Schmidt3d7b5a22013-04-25 16:04:52 +00001528 ret = 0;
1529 }
1530
Arne Jansenbed92ea2012-06-28 18:03:02 +02001531out:
1532
1533 return ret;
1534}
1535
1536/*
1537 * copy the acounting information between qgroups. This is necessary when a
1538 * snapshot or a subvolume is created
1539 */
1540int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1541 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
1542 struct btrfs_qgroup_inherit *inherit)
1543{
1544 int ret = 0;
1545 int i;
1546 u64 *i_qgroups;
1547 struct btrfs_root *quota_root = fs_info->quota_root;
1548 struct btrfs_qgroup *srcgroup;
1549 struct btrfs_qgroup *dstgroup;
1550 u32 level_size = 0;
Wang Shilong3f5e2d32013-04-07 10:50:19 +00001551 u64 nums;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001552
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001553 mutex_lock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001554 if (!fs_info->quota_enabled)
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001555 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001556
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001557 if (!quota_root) {
1558 ret = -EINVAL;
1559 goto out;
1560 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001561
Wang Shilong3f5e2d32013-04-07 10:50:19 +00001562 if (inherit) {
1563 i_qgroups = (u64 *)(inherit + 1);
1564 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
1565 2 * inherit->num_excl_copies;
1566 for (i = 0; i < nums; ++i) {
1567 srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
1568 if (!srcgroup) {
1569 ret = -EINVAL;
1570 goto out;
1571 }
1572 ++i_qgroups;
1573 }
1574 }
1575
Arne Jansenbed92ea2012-06-28 18:03:02 +02001576 /*
1577 * create a tracking group for the subvol itself
1578 */
1579 ret = add_qgroup_item(trans, quota_root, objectid);
1580 if (ret)
1581 goto out;
1582
1583 if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
1584 ret = update_qgroup_limit_item(trans, quota_root, objectid,
1585 inherit->lim.flags,
1586 inherit->lim.max_rfer,
1587 inherit->lim.max_excl,
1588 inherit->lim.rsv_rfer,
1589 inherit->lim.rsv_excl);
1590 if (ret)
1591 goto out;
1592 }
1593
1594 if (srcid) {
1595 struct btrfs_root *srcroot;
1596 struct btrfs_key srckey;
1597 int srcroot_level;
1598
1599 srckey.objectid = srcid;
1600 srckey.type = BTRFS_ROOT_ITEM_KEY;
1601 srckey.offset = (u64)-1;
1602 srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
1603 if (IS_ERR(srcroot)) {
1604 ret = PTR_ERR(srcroot);
1605 goto out;
1606 }
1607
1608 rcu_read_lock();
1609 srcroot_level = btrfs_header_level(srcroot->node);
1610 level_size = btrfs_level_size(srcroot, srcroot_level);
1611 rcu_read_unlock();
1612 }
1613
1614 /*
1615 * add qgroup to all inherited groups
1616 */
1617 if (inherit) {
1618 i_qgroups = (u64 *)(inherit + 1);
1619 for (i = 0; i < inherit->num_qgroups; ++i) {
1620 ret = add_qgroup_relation_item(trans, quota_root,
1621 objectid, *i_qgroups);
1622 if (ret)
1623 goto out;
1624 ret = add_qgroup_relation_item(trans, quota_root,
1625 *i_qgroups, objectid);
1626 if (ret)
1627 goto out;
1628 ++i_qgroups;
1629 }
1630 }
1631
1632
1633 spin_lock(&fs_info->qgroup_lock);
1634
1635 dstgroup = add_qgroup_rb(fs_info, objectid);
Dan Carpenter57a5a882012-07-30 02:15:43 -06001636 if (IS_ERR(dstgroup)) {
1637 ret = PTR_ERR(dstgroup);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001638 goto unlock;
Dan Carpenter57a5a882012-07-30 02:15:43 -06001639 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001640
1641 if (srcid) {
1642 srcgroup = find_qgroup_rb(fs_info, srcid);
Chris Masonf3a87f12012-09-14 20:06:30 -04001643 if (!srcgroup)
Arne Jansenbed92ea2012-06-28 18:03:02 +02001644 goto unlock;
1645 dstgroup->rfer = srcgroup->rfer - level_size;
1646 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr - level_size;
1647 srcgroup->excl = level_size;
1648 srcgroup->excl_cmpr = level_size;
1649 qgroup_dirty(fs_info, dstgroup);
1650 qgroup_dirty(fs_info, srcgroup);
1651 }
1652
Chris Masonf3a87f12012-09-14 20:06:30 -04001653 if (!inherit)
Arne Jansenbed92ea2012-06-28 18:03:02 +02001654 goto unlock;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001655
1656 i_qgroups = (u64 *)(inherit + 1);
1657 for (i = 0; i < inherit->num_qgroups; ++i) {
1658 ret = add_relation_rb(quota_root->fs_info, objectid,
1659 *i_qgroups);
1660 if (ret)
1661 goto unlock;
1662 ++i_qgroups;
1663 }
1664
1665 for (i = 0; i < inherit->num_ref_copies; ++i) {
1666 struct btrfs_qgroup *src;
1667 struct btrfs_qgroup *dst;
1668
1669 src = find_qgroup_rb(fs_info, i_qgroups[0]);
1670 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
1671
1672 if (!src || !dst) {
1673 ret = -EINVAL;
1674 goto unlock;
1675 }
1676
1677 dst->rfer = src->rfer - level_size;
1678 dst->rfer_cmpr = src->rfer_cmpr - level_size;
1679 i_qgroups += 2;
1680 }
1681 for (i = 0; i < inherit->num_excl_copies; ++i) {
1682 struct btrfs_qgroup *src;
1683 struct btrfs_qgroup *dst;
1684
1685 src = find_qgroup_rb(fs_info, i_qgroups[0]);
1686 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
1687
1688 if (!src || !dst) {
1689 ret = -EINVAL;
1690 goto unlock;
1691 }
1692
1693 dst->excl = src->excl + level_size;
1694 dst->excl_cmpr = src->excl_cmpr + level_size;
1695 i_qgroups += 2;
1696 }
1697
1698unlock:
1699 spin_unlock(&fs_info->qgroup_lock);
1700out:
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001701 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001702 return ret;
1703}
1704
1705/*
1706 * reserve some space for a qgroup and all its parents. The reservation takes
1707 * place with start_transaction or dealloc_reserve, similar to ENOSPC
1708 * accounting. If not enough space is available, EDQUOT is returned.
1709 * We assume that the requested space is new for all qgroups.
1710 */
1711int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
1712{
1713 struct btrfs_root *quota_root;
1714 struct btrfs_qgroup *qgroup;
1715 struct btrfs_fs_info *fs_info = root->fs_info;
1716 u64 ref_root = root->root_key.objectid;
1717 int ret = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001718 struct ulist_node *unode;
1719 struct ulist_iterator uiter;
1720
1721 if (!is_fstree(ref_root))
1722 return 0;
1723
1724 if (num_bytes == 0)
1725 return 0;
1726
1727 spin_lock(&fs_info->qgroup_lock);
1728 quota_root = fs_info->quota_root;
1729 if (!quota_root)
1730 goto out;
1731
1732 qgroup = find_qgroup_rb(fs_info, ref_root);
1733 if (!qgroup)
1734 goto out;
1735
1736 /*
1737 * in a first step, we check all affected qgroups if any limits would
1738 * be exceeded
1739 */
Wang Shilong1e8f9152013-05-06 11:03:27 +00001740 ulist_reinit(fs_info->qgroup_ulist);
1741 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
Wang Shilong3c971852013-04-17 14:00:36 +00001742 (uintptr_t)qgroup, GFP_ATOMIC);
1743 if (ret < 0)
1744 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001745 ULIST_ITER_INIT(&uiter);
Wang Shilong1e8f9152013-05-06 11:03:27 +00001746 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
Arne Jansenbed92ea2012-06-28 18:03:02 +02001747 struct btrfs_qgroup *qg;
1748 struct btrfs_qgroup_list *glist;
1749
Jan Schmidt995e01b2012-08-13 02:52:38 -06001750 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001751
1752 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
Wang Shilongb4fcd6b2013-04-15 12:56:49 +00001753 qg->reserved + (s64)qg->rfer + num_bytes >
Wang Shilong720f1e22013-03-06 11:51:47 +00001754 qg->max_rfer) {
Arne Jansenbed92ea2012-06-28 18:03:02 +02001755 ret = -EDQUOT;
Wang Shilong720f1e22013-03-06 11:51:47 +00001756 goto out;
1757 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001758
1759 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
Wang Shilongb4fcd6b2013-04-15 12:56:49 +00001760 qg->reserved + (s64)qg->excl + num_bytes >
Wang Shilong720f1e22013-03-06 11:51:47 +00001761 qg->max_excl) {
Arne Jansenbed92ea2012-06-28 18:03:02 +02001762 ret = -EDQUOT;
Wang Shilong720f1e22013-03-06 11:51:47 +00001763 goto out;
1764 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001765
1766 list_for_each_entry(glist, &qg->groups, next_group) {
Wang Shilong1e8f9152013-05-06 11:03:27 +00001767 ret = ulist_add(fs_info->qgroup_ulist,
1768 glist->group->qgroupid,
Wang Shilong3c971852013-04-17 14:00:36 +00001769 (uintptr_t)glist->group, GFP_ATOMIC);
1770 if (ret < 0)
1771 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001772 }
1773 }
Wang Shilong3c971852013-04-17 14:00:36 +00001774 ret = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001775 /*
1776 * no limits exceeded, now record the reservation into all qgroups
1777 */
1778 ULIST_ITER_INIT(&uiter);
Wang Shilong1e8f9152013-05-06 11:03:27 +00001779 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
Arne Jansenbed92ea2012-06-28 18:03:02 +02001780 struct btrfs_qgroup *qg;
1781
Jan Schmidt995e01b2012-08-13 02:52:38 -06001782 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001783
1784 qg->reserved += num_bytes;
1785 }
1786
1787out:
1788 spin_unlock(&fs_info->qgroup_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001789 return ret;
1790}
1791
1792void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
1793{
1794 struct btrfs_root *quota_root;
1795 struct btrfs_qgroup *qgroup;
1796 struct btrfs_fs_info *fs_info = root->fs_info;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001797 struct ulist_node *unode;
1798 struct ulist_iterator uiter;
1799 u64 ref_root = root->root_key.objectid;
Wang Shilong3c971852013-04-17 14:00:36 +00001800 int ret = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001801
1802 if (!is_fstree(ref_root))
1803 return;
1804
1805 if (num_bytes == 0)
1806 return;
1807
1808 spin_lock(&fs_info->qgroup_lock);
1809
1810 quota_root = fs_info->quota_root;
1811 if (!quota_root)
1812 goto out;
1813
1814 qgroup = find_qgroup_rb(fs_info, ref_root);
1815 if (!qgroup)
1816 goto out;
1817
Wang Shilong1e8f9152013-05-06 11:03:27 +00001818 ulist_reinit(fs_info->qgroup_ulist);
1819 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
Wang Shilong3c971852013-04-17 14:00:36 +00001820 (uintptr_t)qgroup, GFP_ATOMIC);
1821 if (ret < 0)
1822 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001823 ULIST_ITER_INIT(&uiter);
Wang Shilong1e8f9152013-05-06 11:03:27 +00001824 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
Arne Jansenbed92ea2012-06-28 18:03:02 +02001825 struct btrfs_qgroup *qg;
1826 struct btrfs_qgroup_list *glist;
1827
Jan Schmidt995e01b2012-08-13 02:52:38 -06001828 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001829
1830 qg->reserved -= num_bytes;
1831
1832 list_for_each_entry(glist, &qg->groups, next_group) {
Wang Shilong1e8f9152013-05-06 11:03:27 +00001833 ret = ulist_add(fs_info->qgroup_ulist,
1834 glist->group->qgroupid,
Wang Shilong3c971852013-04-17 14:00:36 +00001835 (uintptr_t)glist->group, GFP_ATOMIC);
1836 if (ret < 0)
1837 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001838 }
1839 }
1840
1841out:
1842 spin_unlock(&fs_info->qgroup_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001843}
1844
1845void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
1846{
1847 if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
1848 return;
Jan Schmidtfc36ed7e2013-04-24 16:57:33 +00001849 pr_err("btrfs: qgroups not uptodate in trans handle %p: list is%s empty, seq is %#x.%x\n",
Arne Jansenbed92ea2012-06-28 18:03:02 +02001850 trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
Jan Schmidtfc36ed7e2013-04-24 16:57:33 +00001851 (u32)(trans->delayed_ref_elem.seq >> 32),
1852 (u32)trans->delayed_ref_elem.seq);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001853 BUG();
1854}
Jan Schmidt2f232032013-04-25 16:04:51 +00001855
1856/*
1857 * returns < 0 on error, 0 when more leafs are to be scanned.
1858 * returns 1 when done, 2 when done and FLAG_INCONSISTENT was cleared.
1859 */
1860static int
Jan Schmidtb382a322013-05-28 15:47:24 +00001861qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
Jan Schmidt2f232032013-04-25 16:04:51 +00001862 struct btrfs_trans_handle *trans, struct ulist *tmp,
1863 struct extent_buffer *scratch_leaf)
1864{
1865 struct btrfs_key found;
Jan Schmidt2f232032013-04-25 16:04:51 +00001866 struct ulist *roots = NULL;
1867 struct ulist_node *unode;
1868 struct ulist_iterator uiter;
1869 struct seq_list tree_mod_seq_elem = {};
1870 u64 seq;
1871 int slot;
1872 int ret;
1873
1874 path->leave_spinning = 1;
1875 mutex_lock(&fs_info->qgroup_rescan_lock);
1876 ret = btrfs_search_slot_for_read(fs_info->extent_root,
1877 &fs_info->qgroup_rescan_progress,
1878 path, 1, 0);
1879
1880 pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
1881 (unsigned long long)fs_info->qgroup_rescan_progress.objectid,
1882 fs_info->qgroup_rescan_progress.type,
1883 (unsigned long long)fs_info->qgroup_rescan_progress.offset,
1884 ret);
1885
1886 if (ret) {
1887 /*
1888 * The rescan is about to end, we will not be scanning any
1889 * further blocks. We cannot unset the RESCAN flag here, because
1890 * we want to commit the transaction if everything went well.
1891 * To make the live accounting work in this phase, we set our
1892 * scan progress pointer such that every real extent objectid
1893 * will be smaller.
1894 */
1895 fs_info->qgroup_rescan_progress.objectid = (u64)-1;
1896 btrfs_release_path(path);
1897 mutex_unlock(&fs_info->qgroup_rescan_lock);
1898 return ret;
1899 }
1900
1901 btrfs_item_key_to_cpu(path->nodes[0], &found,
1902 btrfs_header_nritems(path->nodes[0]) - 1);
1903 fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
1904
1905 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1906 memcpy(scratch_leaf, path->nodes[0], sizeof(*scratch_leaf));
1907 slot = path->slots[0];
1908 btrfs_release_path(path);
1909 mutex_unlock(&fs_info->qgroup_rescan_lock);
1910
1911 for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
1912 btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
1913 if (found.type != BTRFS_EXTENT_ITEM_KEY)
1914 continue;
1915 ret = btrfs_find_all_roots(trans, fs_info, found.objectid,
1916 tree_mod_seq_elem.seq, &roots);
1917 if (ret < 0)
1918 goto out;
1919 spin_lock(&fs_info->qgroup_lock);
1920 seq = fs_info->qgroup_seq;
1921 fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
1922
1923 ret = qgroup_account_ref_step1(fs_info, roots, tmp, seq);
1924 if (ret) {
1925 spin_unlock(&fs_info->qgroup_lock);
1926 ulist_free(roots);
1927 goto out;
1928 }
1929
1930 /*
1931 * step2 of btrfs_qgroup_account_ref works from a single root,
1932 * we're doing all at once here.
1933 */
1934 ulist_reinit(tmp);
1935 ULIST_ITER_INIT(&uiter);
1936 while ((unode = ulist_next(roots, &uiter))) {
1937 struct btrfs_qgroup *qg;
1938
1939 qg = find_qgroup_rb(fs_info, unode->val);
1940 if (!qg)
1941 continue;
1942
1943 ret = ulist_add(tmp, qg->qgroupid, (uintptr_t)qg,
1944 GFP_ATOMIC);
1945 if (ret < 0) {
1946 spin_unlock(&fs_info->qgroup_lock);
1947 ulist_free(roots);
1948 goto out;
1949 }
1950 }
1951
1952 /* this loop is similar to step 2 of btrfs_qgroup_account_ref */
1953 ULIST_ITER_INIT(&uiter);
1954 while ((unode = ulist_next(tmp, &uiter))) {
1955 struct btrfs_qgroup *qg;
1956 struct btrfs_qgroup_list *glist;
1957
1958 qg = (struct btrfs_qgroup *)(uintptr_t) unode->aux;
1959 qg->rfer += found.offset;
1960 qg->rfer_cmpr += found.offset;
1961 WARN_ON(qg->tag >= seq);
1962 if (qg->refcnt - seq == roots->nnodes) {
1963 qg->excl += found.offset;
1964 qg->excl_cmpr += found.offset;
1965 }
1966 qgroup_dirty(fs_info, qg);
1967
1968 list_for_each_entry(glist, &qg->groups, next_group) {
1969 ret = ulist_add(tmp, glist->group->qgroupid,
1970 (uintptr_t)glist->group,
1971 GFP_ATOMIC);
1972 if (ret < 0) {
1973 spin_unlock(&fs_info->qgroup_lock);
1974 ulist_free(roots);
1975 goto out;
1976 }
1977 }
1978 }
1979
1980 spin_unlock(&fs_info->qgroup_lock);
1981 ulist_free(roots);
1982 ret = 0;
1983 }
1984
1985out:
1986 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1987
1988 return ret;
1989}
1990
1991static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
1992{
Jan Schmidtb382a322013-05-28 15:47:24 +00001993 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
1994 qgroup_rescan_work);
Jan Schmidt2f232032013-04-25 16:04:51 +00001995 struct btrfs_path *path;
1996 struct btrfs_trans_handle *trans = NULL;
Jan Schmidt2f232032013-04-25 16:04:51 +00001997 struct ulist *tmp = NULL;
1998 struct extent_buffer *scratch_leaf = NULL;
1999 int err = -ENOMEM;
2000
2001 path = btrfs_alloc_path();
2002 if (!path)
2003 goto out;
2004 tmp = ulist_alloc(GFP_NOFS);
2005 if (!tmp)
2006 goto out;
2007 scratch_leaf = kmalloc(sizeof(*scratch_leaf), GFP_NOFS);
2008 if (!scratch_leaf)
2009 goto out;
2010
2011 err = 0;
2012 while (!err) {
2013 trans = btrfs_start_transaction(fs_info->fs_root, 0);
2014 if (IS_ERR(trans)) {
2015 err = PTR_ERR(trans);
2016 break;
2017 }
2018 if (!fs_info->quota_enabled) {
2019 err = -EINTR;
2020 } else {
Jan Schmidtb382a322013-05-28 15:47:24 +00002021 err = qgroup_rescan_leaf(fs_info, path, trans,
Jan Schmidt2f232032013-04-25 16:04:51 +00002022 tmp, scratch_leaf);
2023 }
2024 if (err > 0)
2025 btrfs_commit_transaction(trans, fs_info->fs_root);
2026 else
2027 btrfs_end_transaction(trans, fs_info->fs_root);
2028 }
2029
2030out:
2031 kfree(scratch_leaf);
2032 ulist_free(tmp);
2033 btrfs_free_path(path);
Jan Schmidt2f232032013-04-25 16:04:51 +00002034
2035 mutex_lock(&fs_info->qgroup_rescan_lock);
2036 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2037
2038 if (err == 2 &&
2039 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
2040 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2041 } else if (err < 0) {
2042 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2043 }
2044 mutex_unlock(&fs_info->qgroup_rescan_lock);
2045
2046 if (err >= 0) {
2047 pr_info("btrfs: qgroup scan completed%s\n",
2048 err == 2 ? " (inconsistency flag cleared)" : "");
2049 } else {
2050 pr_err("btrfs: qgroup scan failed with %d\n", err);
2051 }
Jan Schmidt57254b6e2013-05-06 19:14:17 +00002052
2053 complete_all(&fs_info->qgroup_rescan_completion);
Jan Schmidt2f232032013-04-25 16:04:51 +00002054}
2055
Jan Schmidtb382a322013-05-28 15:47:24 +00002056/*
2057 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
2058 * memory required for the rescan context.
2059 */
2060static int
2061qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
2062 int init_flags)
Jan Schmidt2f232032013-04-25 16:04:51 +00002063{
2064 int ret = 0;
Jan Schmidt2f232032013-04-25 16:04:51 +00002065
Jan Schmidtb382a322013-05-28 15:47:24 +00002066 if (!init_flags &&
2067 (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ||
2068 !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) {
2069 ret = -EINVAL;
2070 goto err;
2071 }
Jan Schmidt2f232032013-04-25 16:04:51 +00002072
2073 mutex_lock(&fs_info->qgroup_rescan_lock);
2074 spin_lock(&fs_info->qgroup_lock);
Jan Schmidtb382a322013-05-28 15:47:24 +00002075
2076 if (init_flags) {
2077 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2078 ret = -EINPROGRESS;
2079 else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
2080 ret = -EINVAL;
2081
2082 if (ret) {
2083 spin_unlock(&fs_info->qgroup_lock);
2084 mutex_unlock(&fs_info->qgroup_rescan_lock);
2085 goto err;
2086 }
2087
2088 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2089 }
2090
2091 memset(&fs_info->qgroup_rescan_progress, 0,
2092 sizeof(fs_info->qgroup_rescan_progress));
2093 fs_info->qgroup_rescan_progress.objectid = progress_objectid;
2094
2095 spin_unlock(&fs_info->qgroup_lock);
2096 mutex_unlock(&fs_info->qgroup_rescan_lock);
2097
2098 init_completion(&fs_info->qgroup_rescan_completion);
2099
2100 memset(&fs_info->qgroup_rescan_work, 0,
2101 sizeof(fs_info->qgroup_rescan_work));
2102 fs_info->qgroup_rescan_work.func = btrfs_qgroup_rescan_worker;
2103
Jan Schmidt2f232032013-04-25 16:04:51 +00002104 if (ret) {
Jan Schmidtb382a322013-05-28 15:47:24 +00002105err:
2106 pr_info("btrfs: qgroup_rescan_init failed with %d\n", ret);
Jan Schmidt2f232032013-04-25 16:04:51 +00002107 return ret;
2108 }
2109
Jan Schmidtb382a322013-05-28 15:47:24 +00002110 return 0;
2111}
Jan Schmidt2f232032013-04-25 16:04:51 +00002112
Jan Schmidtb382a322013-05-28 15:47:24 +00002113static void
2114qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
2115{
2116 struct rb_node *n;
2117 struct btrfs_qgroup *qgroup;
2118
2119 spin_lock(&fs_info->qgroup_lock);
Jan Schmidt2f232032013-04-25 16:04:51 +00002120 /* clear all current qgroup tracking information */
2121 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
2122 qgroup = rb_entry(n, struct btrfs_qgroup, node);
2123 qgroup->rfer = 0;
2124 qgroup->rfer_cmpr = 0;
2125 qgroup->excl = 0;
2126 qgroup->excl_cmpr = 0;
2127 }
2128 spin_unlock(&fs_info->qgroup_lock);
Jan Schmidtb382a322013-05-28 15:47:24 +00002129}
Jan Schmidt2f232032013-04-25 16:04:51 +00002130
Jan Schmidtb382a322013-05-28 15:47:24 +00002131int
2132btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
2133{
2134 int ret = 0;
2135 struct btrfs_trans_handle *trans;
2136
2137 ret = qgroup_rescan_init(fs_info, 0, 1);
2138 if (ret)
2139 return ret;
2140
2141 /*
2142 * We have set the rescan_progress to 0, which means no more
2143 * delayed refs will be accounted by btrfs_qgroup_account_ref.
2144 * However, btrfs_qgroup_account_ref may be right after its call
2145 * to btrfs_find_all_roots, in which case it would still do the
2146 * accounting.
2147 * To solve this, we're committing the transaction, which will
2148 * ensure we run all delayed refs and only after that, we are
2149 * going to clear all tracking information for a clean start.
2150 */
2151
2152 trans = btrfs_join_transaction(fs_info->fs_root);
2153 if (IS_ERR(trans)) {
2154 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2155 return PTR_ERR(trans);
2156 }
2157 ret = btrfs_commit_transaction(trans, fs_info->fs_root);
2158 if (ret) {
2159 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2160 return ret;
2161 }
2162
2163 qgroup_rescan_zero_tracking(fs_info);
2164
2165 btrfs_queue_worker(&fs_info->qgroup_rescan_workers,
2166 &fs_info->qgroup_rescan_work);
Jan Schmidt2f232032013-04-25 16:04:51 +00002167
2168 return 0;
2169}
Jan Schmidt57254b6e2013-05-06 19:14:17 +00002170
2171int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info)
2172{
2173 int running;
2174 int ret = 0;
2175
2176 mutex_lock(&fs_info->qgroup_rescan_lock);
2177 spin_lock(&fs_info->qgroup_lock);
2178 running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2179 spin_unlock(&fs_info->qgroup_lock);
2180 mutex_unlock(&fs_info->qgroup_rescan_lock);
2181
2182 if (running)
2183 ret = wait_for_completion_interruptible(
2184 &fs_info->qgroup_rescan_completion);
2185
2186 return ret;
2187}
Jan Schmidtb382a322013-05-28 15:47:24 +00002188
2189/*
2190 * this is only called from open_ctree where we're still single threaded, thus
2191 * locking is omitted here.
2192 */
2193void
2194btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
2195{
2196 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2197 btrfs_queue_worker(&fs_info->qgroup_rescan_workers,
2198 &fs_info->qgroup_rescan_work);
2199}