2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include <linux/btrfs.h>
29 #include "transaction.h"
34 #include "extent_io.h"
38 * - subvol delete -> delete when ref goes to 0? delete limits also?
42 * - copy also limits on subvol creation
44 * - caches fuer ulists
45 * - performance benchmarks
46 * - check all ioctl parameters
50 * one struct for each qgroup, organized in fs_info->qgroup_tree.
58 u64 rfer; /* referenced */
59 u64 rfer_cmpr; /* referenced compressed */
60 u64 excl; /* exclusive */
61 u64 excl_cmpr; /* exclusive compressed */
66 u64 lim_flags; /* which limits are set */
73 * reservation tracking
80 struct list_head groups; /* groups this group is member of */
81 struct list_head members; /* groups that are members of this group */
82 struct list_head dirty; /* dirty groups */
83 struct rb_node node; /* tree of qgroups */
86 * temp variables for accounting operations
93 * glue structure to represent the relations between qgroups.
95 struct btrfs_qgroup_list {
96 struct list_head next_group;
97 struct list_head next_member;
98 struct btrfs_qgroup *group;
99 struct btrfs_qgroup *member;
102 #define ptr_to_u64(x) ((u64)(uintptr_t)x)
103 #define u64_to_ptr(x) ((struct btrfs_qgroup *)(uintptr_t)x)
106 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
108 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
110 /* must be called with qgroup_ioctl_lock held */
111 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
114 struct rb_node *n = fs_info->qgroup_tree.rb_node;
115 struct btrfs_qgroup *qgroup;
118 qgroup = rb_entry(n, struct btrfs_qgroup, node);
119 if (qgroup->qgroupid < qgroupid)
121 else if (qgroup->qgroupid > qgroupid)
129 /* must be called with qgroup_lock held */
130 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
133 struct rb_node **p = &fs_info->qgroup_tree.rb_node;
134 struct rb_node *parent = NULL;
135 struct btrfs_qgroup *qgroup;
139 qgroup = rb_entry(parent, struct btrfs_qgroup, node);
141 if (qgroup->qgroupid < qgroupid)
143 else if (qgroup->qgroupid > qgroupid)
149 qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
151 return ERR_PTR(-ENOMEM);
153 qgroup->qgroupid = qgroupid;
154 INIT_LIST_HEAD(&qgroup->groups);
155 INIT_LIST_HEAD(&qgroup->members);
156 INIT_LIST_HEAD(&qgroup->dirty);
158 rb_link_node(&qgroup->node, parent, p);
159 rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
164 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
166 struct btrfs_qgroup_list *list;
168 list_del(&qgroup->dirty);
169 while (!list_empty(&qgroup->groups)) {
170 list = list_first_entry(&qgroup->groups,
171 struct btrfs_qgroup_list, next_group);
172 list_del(&list->next_group);
173 list_del(&list->next_member);
177 while (!list_empty(&qgroup->members)) {
178 list = list_first_entry(&qgroup->members,
179 struct btrfs_qgroup_list, next_member);
180 list_del(&list->next_group);
181 list_del(&list->next_member);
187 /* must be called with qgroup_lock held */
188 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
190 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
195 rb_erase(&qgroup->node, &fs_info->qgroup_tree);
196 __del_qgroup_rb(qgroup);
200 /* must be called with qgroup_lock held */
201 static int add_relation_rb(struct btrfs_fs_info *fs_info,
202 u64 memberid, u64 parentid)
204 struct btrfs_qgroup *member;
205 struct btrfs_qgroup *parent;
206 struct btrfs_qgroup_list *list;
208 member = find_qgroup_rb(fs_info, memberid);
209 parent = find_qgroup_rb(fs_info, parentid);
210 if (!member || !parent)
213 list = kzalloc(sizeof(*list), GFP_ATOMIC);
217 list->group = parent;
218 list->member = member;
219 list_add_tail(&list->next_group, &member->groups);
220 list_add_tail(&list->next_member, &parent->members);
225 /* must be called with qgroup_lock held */
226 static int del_relation_rb(struct btrfs_fs_info *fs_info,
227 u64 memberid, u64 parentid)
229 struct btrfs_qgroup *member;
230 struct btrfs_qgroup *parent;
231 struct btrfs_qgroup_list *list;
233 member = find_qgroup_rb(fs_info, memberid);
234 parent = find_qgroup_rb(fs_info, parentid);
235 if (!member || !parent)
238 list_for_each_entry(list, &member->groups, next_group) {
239 if (list->group == parent) {
240 list_del(&list->next_group);
241 list_del(&list->next_member);
250 * The full config is read in one go, only called from open_ctree()
251 * It doesn't use any locking, as at this point we're still single-threaded
253 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
255 struct btrfs_key key;
256 struct btrfs_key found_key;
257 struct btrfs_root *quota_root = fs_info->quota_root;
258 struct btrfs_path *path = NULL;
259 struct extent_buffer *l;
263 u64 rescan_progress = 0;
265 if (!fs_info->quota_enabled)
268 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
269 if (!fs_info->qgroup_ulist) {
274 path = btrfs_alloc_path();
280 /* default this to quota off, in case no status key is found */
281 fs_info->qgroup_flags = 0;
284 * pass 1: read status, all qgroup infos and limits
289 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
294 struct btrfs_qgroup *qgroup;
296 slot = path->slots[0];
298 btrfs_item_key_to_cpu(l, &found_key, slot);
300 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
301 struct btrfs_qgroup_status_item *ptr;
303 ptr = btrfs_item_ptr(l, slot,
304 struct btrfs_qgroup_status_item);
306 if (btrfs_qgroup_status_version(l, ptr) !=
307 BTRFS_QGROUP_STATUS_VERSION) {
309 "old qgroup version, quota disabled");
312 if (btrfs_qgroup_status_generation(l, ptr) !=
313 fs_info->generation) {
314 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
316 "qgroup generation mismatch, "
317 "marked as inconsistent");
319 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
321 rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
325 if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
326 found_key.type != BTRFS_QGROUP_LIMIT_KEY)
329 qgroup = find_qgroup_rb(fs_info, found_key.offset);
330 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
331 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
332 btrfs_err(fs_info, "inconsitent qgroup config");
333 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
336 qgroup = add_qgroup_rb(fs_info, found_key.offset);
337 if (IS_ERR(qgroup)) {
338 ret = PTR_ERR(qgroup);
342 switch (found_key.type) {
343 case BTRFS_QGROUP_INFO_KEY: {
344 struct btrfs_qgroup_info_item *ptr;
346 ptr = btrfs_item_ptr(l, slot,
347 struct btrfs_qgroup_info_item);
348 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
349 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
350 qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
351 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
352 /* generation currently unused */
355 case BTRFS_QGROUP_LIMIT_KEY: {
356 struct btrfs_qgroup_limit_item *ptr;
358 ptr = btrfs_item_ptr(l, slot,
359 struct btrfs_qgroup_limit_item);
360 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
361 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
362 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
363 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
364 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
369 ret = btrfs_next_item(quota_root, path);
375 btrfs_release_path(path);
378 * pass 2: read all qgroup relations
381 key.type = BTRFS_QGROUP_RELATION_KEY;
383 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
387 slot = path->slots[0];
389 btrfs_item_key_to_cpu(l, &found_key, slot);
391 if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
394 if (found_key.objectid > found_key.offset) {
395 /* parent <- member, not needed to build config */
396 /* FIXME should we omit the key completely? */
400 ret = add_relation_rb(fs_info, found_key.objectid,
402 if (ret == -ENOENT) {
404 "orphan qgroup relation 0x%llx->0x%llx",
405 found_key.objectid, found_key.offset);
406 ret = 0; /* ignore the error */
411 ret = btrfs_next_item(quota_root, path);
418 fs_info->qgroup_flags |= flags;
419 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
420 fs_info->quota_enabled = 0;
421 fs_info->pending_quota_state = 0;
422 } else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
424 ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
426 btrfs_free_path(path);
429 ulist_free(fs_info->qgroup_ulist);
430 fs_info->qgroup_ulist = NULL;
431 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
434 return ret < 0 ? ret : 0;
438 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
439 * first two are in single-threaded paths.And for the third one, we have set
440 * quota_root to be null with qgroup_lock held before, so it is safe to clean
441 * up the in-memory structures without qgroup_lock held.
443 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
446 struct btrfs_qgroup *qgroup;
448 while ((n = rb_first(&fs_info->qgroup_tree))) {
449 qgroup = rb_entry(n, struct btrfs_qgroup, node);
450 rb_erase(n, &fs_info->qgroup_tree);
451 __del_qgroup_rb(qgroup);
454 * we call btrfs_free_qgroup_config() when umounting
455 * filesystem and disabling quota, so we set qgroup_ulit
456 * to be null here to avoid double free.
458 ulist_free(fs_info->qgroup_ulist);
459 fs_info->qgroup_ulist = NULL;
462 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
463 struct btrfs_root *quota_root,
467 struct btrfs_path *path;
468 struct btrfs_key key;
470 path = btrfs_alloc_path();
475 key.type = BTRFS_QGROUP_RELATION_KEY;
478 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
480 btrfs_mark_buffer_dirty(path->nodes[0]);
482 btrfs_free_path(path);
486 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
487 struct btrfs_root *quota_root,
491 struct btrfs_path *path;
492 struct btrfs_key key;
494 path = btrfs_alloc_path();
499 key.type = BTRFS_QGROUP_RELATION_KEY;
502 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
511 ret = btrfs_del_item(trans, quota_root, path);
513 btrfs_free_path(path);
517 static int add_qgroup_item(struct btrfs_trans_handle *trans,
518 struct btrfs_root *quota_root, u64 qgroupid)
521 struct btrfs_path *path;
522 struct btrfs_qgroup_info_item *qgroup_info;
523 struct btrfs_qgroup_limit_item *qgroup_limit;
524 struct extent_buffer *leaf;
525 struct btrfs_key key;
527 path = btrfs_alloc_path();
532 key.type = BTRFS_QGROUP_INFO_KEY;
533 key.offset = qgroupid;
535 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
536 sizeof(*qgroup_info));
540 leaf = path->nodes[0];
541 qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
542 struct btrfs_qgroup_info_item);
543 btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
544 btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
545 btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
546 btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
547 btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
549 btrfs_mark_buffer_dirty(leaf);
551 btrfs_release_path(path);
553 key.type = BTRFS_QGROUP_LIMIT_KEY;
554 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
555 sizeof(*qgroup_limit));
559 leaf = path->nodes[0];
560 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
561 struct btrfs_qgroup_limit_item);
562 btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
563 btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
564 btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
565 btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
566 btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
568 btrfs_mark_buffer_dirty(leaf);
572 btrfs_free_path(path);
576 static int del_qgroup_item(struct btrfs_trans_handle *trans,
577 struct btrfs_root *quota_root, u64 qgroupid)
580 struct btrfs_path *path;
581 struct btrfs_key key;
583 path = btrfs_alloc_path();
588 key.type = BTRFS_QGROUP_INFO_KEY;
589 key.offset = qgroupid;
590 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
599 ret = btrfs_del_item(trans, quota_root, path);
603 btrfs_release_path(path);
605 key.type = BTRFS_QGROUP_LIMIT_KEY;
606 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
615 ret = btrfs_del_item(trans, quota_root, path);
618 btrfs_free_path(path);
622 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
623 struct btrfs_root *root, u64 qgroupid,
624 u64 flags, u64 max_rfer, u64 max_excl,
625 u64 rsv_rfer, u64 rsv_excl)
627 struct btrfs_path *path;
628 struct btrfs_key key;
629 struct extent_buffer *l;
630 struct btrfs_qgroup_limit_item *qgroup_limit;
635 key.type = BTRFS_QGROUP_LIMIT_KEY;
636 key.offset = qgroupid;
638 path = btrfs_alloc_path();
642 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
650 slot = path->slots[0];
651 qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
652 btrfs_set_qgroup_limit_flags(l, qgroup_limit, flags);
653 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, max_rfer);
654 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, max_excl);
655 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, rsv_rfer);
656 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, rsv_excl);
658 btrfs_mark_buffer_dirty(l);
661 btrfs_free_path(path);
665 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
666 struct btrfs_root *root,
667 struct btrfs_qgroup *qgroup)
669 struct btrfs_path *path;
670 struct btrfs_key key;
671 struct extent_buffer *l;
672 struct btrfs_qgroup_info_item *qgroup_info;
677 key.type = BTRFS_QGROUP_INFO_KEY;
678 key.offset = qgroup->qgroupid;
680 path = btrfs_alloc_path();
684 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
692 slot = path->slots[0];
693 qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
694 btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
695 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
696 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
697 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
698 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
700 btrfs_mark_buffer_dirty(l);
703 btrfs_free_path(path);
707 static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
708 struct btrfs_fs_info *fs_info,
709 struct btrfs_root *root)
711 struct btrfs_path *path;
712 struct btrfs_key key;
713 struct extent_buffer *l;
714 struct btrfs_qgroup_status_item *ptr;
719 key.type = BTRFS_QGROUP_STATUS_KEY;
722 path = btrfs_alloc_path();
726 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
734 slot = path->slots[0];
735 ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
736 btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
737 btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
738 btrfs_set_qgroup_status_rescan(l, ptr,
739 fs_info->qgroup_rescan_progress.objectid);
741 btrfs_mark_buffer_dirty(l);
744 btrfs_free_path(path);
749 * called with qgroup_lock held
751 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
752 struct btrfs_root *root)
754 struct btrfs_path *path;
755 struct btrfs_key key;
756 struct extent_buffer *leaf = NULL;
760 path = btrfs_alloc_path();
764 path->leave_spinning = 1;
771 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
774 leaf = path->nodes[0];
775 nr = btrfs_header_nritems(leaf);
779 * delete the leaf one by one
780 * since the whole tree is going
784 ret = btrfs_del_items(trans, root, path, 0, nr);
788 btrfs_release_path(path);
792 root->fs_info->pending_quota_state = 0;
793 btrfs_free_path(path);
797 int btrfs_quota_enable(struct btrfs_trans_handle *trans,
798 struct btrfs_fs_info *fs_info)
800 struct btrfs_root *quota_root;
801 struct btrfs_root *tree_root = fs_info->tree_root;
802 struct btrfs_path *path = NULL;
803 struct btrfs_qgroup_status_item *ptr;
804 struct extent_buffer *leaf;
805 struct btrfs_key key;
806 struct btrfs_key found_key;
807 struct btrfs_qgroup *qgroup = NULL;
811 mutex_lock(&fs_info->qgroup_ioctl_lock);
812 if (fs_info->quota_root) {
813 fs_info->pending_quota_state = 1;
817 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
818 if (!fs_info->qgroup_ulist) {
824 * initially create the quota tree
826 quota_root = btrfs_create_tree(trans, fs_info,
827 BTRFS_QUOTA_TREE_OBJECTID);
828 if (IS_ERR(quota_root)) {
829 ret = PTR_ERR(quota_root);
833 path = btrfs_alloc_path();
840 key.type = BTRFS_QGROUP_STATUS_KEY;
843 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
848 leaf = path->nodes[0];
849 ptr = btrfs_item_ptr(leaf, path->slots[0],
850 struct btrfs_qgroup_status_item);
851 btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
852 btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
853 fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
854 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
855 btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
856 btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
858 btrfs_mark_buffer_dirty(leaf);
861 key.type = BTRFS_ROOT_REF_KEY;
864 btrfs_release_path(path);
865 ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
873 slot = path->slots[0];
874 leaf = path->nodes[0];
875 btrfs_item_key_to_cpu(leaf, &found_key, slot);
877 if (found_key.type == BTRFS_ROOT_REF_KEY) {
878 ret = add_qgroup_item(trans, quota_root,
883 qgroup = add_qgroup_rb(fs_info, found_key.offset);
884 if (IS_ERR(qgroup)) {
885 ret = PTR_ERR(qgroup);
889 ret = btrfs_next_item(tree_root, path);
897 btrfs_release_path(path);
898 ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
902 qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
903 if (IS_ERR(qgroup)) {
904 ret = PTR_ERR(qgroup);
907 spin_lock(&fs_info->qgroup_lock);
908 fs_info->quota_root = quota_root;
909 fs_info->pending_quota_state = 1;
910 spin_unlock(&fs_info->qgroup_lock);
912 btrfs_free_path(path);
915 free_extent_buffer(quota_root->node);
916 free_extent_buffer(quota_root->commit_root);
921 ulist_free(fs_info->qgroup_ulist);
922 fs_info->qgroup_ulist = NULL;
924 mutex_unlock(&fs_info->qgroup_ioctl_lock);
928 int btrfs_quota_disable(struct btrfs_trans_handle *trans,
929 struct btrfs_fs_info *fs_info)
931 struct btrfs_root *tree_root = fs_info->tree_root;
932 struct btrfs_root *quota_root;
935 mutex_lock(&fs_info->qgroup_ioctl_lock);
936 if (!fs_info->quota_root)
938 spin_lock(&fs_info->qgroup_lock);
939 fs_info->quota_enabled = 0;
940 fs_info->pending_quota_state = 0;
941 quota_root = fs_info->quota_root;
942 fs_info->quota_root = NULL;
943 spin_unlock(&fs_info->qgroup_lock);
945 btrfs_free_qgroup_config(fs_info);
947 ret = btrfs_clean_quota_tree(trans, quota_root);
951 ret = btrfs_del_root(trans, tree_root, "a_root->root_key);
955 list_del("a_root->dirty_list);
957 btrfs_tree_lock(quota_root->node);
958 clean_tree_block(trans, tree_root, quota_root->node);
959 btrfs_tree_unlock(quota_root->node);
960 btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
962 free_extent_buffer(quota_root->node);
963 free_extent_buffer(quota_root->commit_root);
966 mutex_unlock(&fs_info->qgroup_ioctl_lock);
970 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
971 struct btrfs_qgroup *qgroup)
973 if (list_empty(&qgroup->dirty))
974 list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
977 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
978 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
980 struct btrfs_root *quota_root;
981 struct btrfs_qgroup *parent;
982 struct btrfs_qgroup *member;
983 struct btrfs_qgroup_list *list;
986 mutex_lock(&fs_info->qgroup_ioctl_lock);
987 quota_root = fs_info->quota_root;
992 member = find_qgroup_rb(fs_info, src);
993 parent = find_qgroup_rb(fs_info, dst);
994 if (!member || !parent) {
999 /* check if such qgroup relation exist firstly */
1000 list_for_each_entry(list, &member->groups, next_group) {
1001 if (list->group == parent) {
1007 ret = add_qgroup_relation_item(trans, quota_root, src, dst);
1011 ret = add_qgroup_relation_item(trans, quota_root, dst, src);
1013 del_qgroup_relation_item(trans, quota_root, src, dst);
1017 spin_lock(&fs_info->qgroup_lock);
1018 ret = add_relation_rb(quota_root->fs_info, src, dst);
1019 spin_unlock(&fs_info->qgroup_lock);
1021 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1025 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
1026 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1028 struct btrfs_root *quota_root;
1029 struct btrfs_qgroup *parent;
1030 struct btrfs_qgroup *member;
1031 struct btrfs_qgroup_list *list;
1035 mutex_lock(&fs_info->qgroup_ioctl_lock);
1036 quota_root = fs_info->quota_root;
1042 member = find_qgroup_rb(fs_info, src);
1043 parent = find_qgroup_rb(fs_info, dst);
1044 if (!member || !parent) {
1049 /* check if such qgroup relation exist firstly */
1050 list_for_each_entry(list, &member->groups, next_group) {
1051 if (list->group == parent)
1057 ret = del_qgroup_relation_item(trans, quota_root, src, dst);
1058 err = del_qgroup_relation_item(trans, quota_root, dst, src);
1062 spin_lock(&fs_info->qgroup_lock);
1063 del_relation_rb(fs_info, src, dst);
1064 spin_unlock(&fs_info->qgroup_lock);
1066 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1070 int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
1071 struct btrfs_fs_info *fs_info, u64 qgroupid, char *name)
1073 struct btrfs_root *quota_root;
1074 struct btrfs_qgroup *qgroup;
1077 mutex_lock(&fs_info->qgroup_ioctl_lock);
1078 quota_root = fs_info->quota_root;
1083 qgroup = find_qgroup_rb(fs_info, qgroupid);
1089 ret = add_qgroup_item(trans, quota_root, qgroupid);
1093 spin_lock(&fs_info->qgroup_lock);
1094 qgroup = add_qgroup_rb(fs_info, qgroupid);
1095 spin_unlock(&fs_info->qgroup_lock);
1098 ret = PTR_ERR(qgroup);
1100 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1104 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
1105 struct btrfs_fs_info *fs_info, u64 qgroupid)
1107 struct btrfs_root *quota_root;
1108 struct btrfs_qgroup *qgroup;
1111 mutex_lock(&fs_info->qgroup_ioctl_lock);
1112 quota_root = fs_info->quota_root;
1118 qgroup = find_qgroup_rb(fs_info, qgroupid);
1123 /* check if there are no relations to this qgroup */
1124 if (!list_empty(&qgroup->groups) ||
1125 !list_empty(&qgroup->members)) {
1130 ret = del_qgroup_item(trans, quota_root, qgroupid);
1132 spin_lock(&fs_info->qgroup_lock);
1133 del_qgroup_rb(quota_root->fs_info, qgroupid);
1134 spin_unlock(&fs_info->qgroup_lock);
1136 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1140 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1141 struct btrfs_fs_info *fs_info, u64 qgroupid,
1142 struct btrfs_qgroup_limit *limit)
1144 struct btrfs_root *quota_root;
1145 struct btrfs_qgroup *qgroup;
1148 mutex_lock(&fs_info->qgroup_ioctl_lock);
1149 quota_root = fs_info->quota_root;
1155 qgroup = find_qgroup_rb(fs_info, qgroupid);
1160 ret = update_qgroup_limit_item(trans, quota_root, qgroupid,
1161 limit->flags, limit->max_rfer,
1162 limit->max_excl, limit->rsv_rfer,
1165 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1166 btrfs_info(fs_info, "unable to update quota limit for %llu",
1170 spin_lock(&fs_info->qgroup_lock);
1171 qgroup->lim_flags = limit->flags;
1172 qgroup->max_rfer = limit->max_rfer;
1173 qgroup->max_excl = limit->max_excl;
1174 qgroup->rsv_rfer = limit->rsv_rfer;
1175 qgroup->rsv_excl = limit->rsv_excl;
1176 spin_unlock(&fs_info->qgroup_lock);
1178 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1181 static int comp_oper(struct btrfs_qgroup_operation *oper1,
1182 struct btrfs_qgroup_operation *oper2)
1184 if (oper1->bytenr < oper2->bytenr)
1186 if (oper1->bytenr > oper2->bytenr)
1188 if (oper1->seq < oper2->seq)
1190 if (oper1->seq > oper2->seq)
1192 if (oper1->ref_root < oper2->ref_root)
1194 if (oper1->ref_root > oper2->ref_root)
1196 if (oper1->type < oper2->type)
1198 if (oper1->type > oper2->type)
1203 static int insert_qgroup_oper(struct btrfs_fs_info *fs_info,
1204 struct btrfs_qgroup_operation *oper)
1207 struct rb_node *parent = NULL;
1208 struct btrfs_qgroup_operation *cur;
1211 spin_lock(&fs_info->qgroup_op_lock);
1212 p = &fs_info->qgroup_op_tree.rb_node;
1215 cur = rb_entry(parent, struct btrfs_qgroup_operation, n);
1216 cmp = comp_oper(cur, oper);
1218 p = &(*p)->rb_right;
1222 spin_unlock(&fs_info->qgroup_op_lock);
1226 rb_link_node(&oper->n, parent, p);
1227 rb_insert_color(&oper->n, &fs_info->qgroup_op_tree);
1228 spin_unlock(&fs_info->qgroup_op_lock);
1233 * Record a quota operation for processing later on.
1234 * @trans: the transaction we are adding the delayed op to.
1235 * @fs_info: the fs_info for this fs.
1236 * @ref_root: the root of the reference we are acting on,
1237 * @bytenr: the bytenr we are acting on.
1238 * @num_bytes: the number of bytes in the reference.
1239 * @type: the type of operation this is.
1240 * @mod_seq: do we need to get a sequence number for looking up roots.
1242 * We just add it to our trans qgroup_ref_list and carry on and process these
1243 * operations in order at some later point. If the reference root isn't a fs
1244 * root then we don't bother with doing anything.
1246 * MUST BE HOLDING THE REF LOCK.
1248 int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
1249 struct btrfs_fs_info *fs_info, u64 ref_root,
1250 u64 bytenr, u64 num_bytes,
1251 enum btrfs_qgroup_operation_type type, int mod_seq)
1253 struct btrfs_qgroup_operation *oper;
1256 if (!is_fstree(ref_root) || !fs_info->quota_enabled)
1259 oper = kmalloc(sizeof(*oper), GFP_NOFS);
1263 oper->ref_root = ref_root;
1264 oper->bytenr = bytenr;
1265 oper->num_bytes = num_bytes;
1267 oper->seq = atomic_inc_return(&fs_info->qgroup_op_seq);
1268 INIT_LIST_HEAD(&oper->elem.list);
1270 ret = insert_qgroup_oper(fs_info, oper);
1272 /* Shouldn't happen so have an assert for developers */
1277 list_add_tail(&oper->list, &trans->qgroup_ref_list);
1280 btrfs_get_tree_mod_seq(fs_info, &oper->elem);
1286 * The easy accounting, if we are adding/removing the only ref for an extent
1287 * then this qgroup and all of the parent qgroups get their refrence and
1288 * exclusive counts adjusted.
1290 static int qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1291 struct btrfs_qgroup_operation *oper)
1293 struct btrfs_qgroup *qgroup;
1295 struct btrfs_qgroup_list *glist;
1296 struct ulist_node *unode;
1297 struct ulist_iterator uiter;
1301 tmp = ulist_alloc(GFP_NOFS);
1305 spin_lock(&fs_info->qgroup_lock);
1306 if (!fs_info->quota_root)
1308 qgroup = find_qgroup_rb(fs_info, oper->ref_root);
1311 switch (oper->type) {
1312 case BTRFS_QGROUP_OPER_ADD_EXCL:
1315 case BTRFS_QGROUP_OPER_SUB_EXCL:
1321 qgroup->rfer += sign * oper->num_bytes;
1322 qgroup->rfer_cmpr += sign * oper->num_bytes;
1324 WARN_ON(sign < 0 && qgroup->excl < oper->num_bytes);
1325 qgroup->excl += sign * oper->num_bytes;
1326 qgroup->excl_cmpr += sign * oper->num_bytes;
1328 qgroup_dirty(fs_info, qgroup);
1330 /* Get all of the parent groups that contain this qgroup */
1331 list_for_each_entry(glist, &qgroup->groups, next_group) {
1332 ret = ulist_add(tmp, glist->group->qgroupid,
1333 ptr_to_u64(glist->group), GFP_ATOMIC);
1338 /* Iterate all of the parents and adjust their reference counts */
1339 ULIST_ITER_INIT(&uiter);
1340 while ((unode = ulist_next(tmp, &uiter))) {
1341 qgroup = u64_to_ptr(unode->aux);
1342 qgroup->rfer += sign * oper->num_bytes;
1343 qgroup->rfer_cmpr += sign * oper->num_bytes;
1344 qgroup->excl += sign * oper->num_bytes;
1346 WARN_ON(qgroup->excl < oper->num_bytes);
1347 qgroup->excl_cmpr += sign * oper->num_bytes;
1348 qgroup_dirty(fs_info, qgroup);
1350 /* Add any parents of the parents */
1351 list_for_each_entry(glist, &qgroup->groups, next_group) {
1352 ret = ulist_add(tmp, glist->group->qgroupid,
1353 ptr_to_u64(glist->group), GFP_ATOMIC);
1360 spin_unlock(&fs_info->qgroup_lock);
1366 * Walk all of the roots that pointed to our bytenr and adjust their refcnts as
1369 static int qgroup_calc_old_refcnt(struct btrfs_fs_info *fs_info,
1370 u64 root_to_skip, struct ulist *tmp,
1371 struct ulist *roots, struct ulist *qgroups,
1372 u64 seq, int *old_roots, int rescan)
1374 struct ulist_node *unode;
1375 struct ulist_iterator uiter;
1376 struct ulist_node *tmp_unode;
1377 struct ulist_iterator tmp_uiter;
1378 struct btrfs_qgroup *qg;
1381 ULIST_ITER_INIT(&uiter);
1382 while ((unode = ulist_next(roots, &uiter))) {
1383 /* We don't count our current root here */
1384 if (unode->val == root_to_skip)
1386 qg = find_qgroup_rb(fs_info, unode->val);
1390 * We could have a pending removal of this same ref so we may
1391 * not have actually found our ref root when doing
1392 * btrfs_find_all_roots, so we need to keep track of how many
1393 * old roots we find in case we removed ours and added a
1394 * different one at the same time. I don't think this could
1395 * happen in practice but that sort of thinking leads to pain
1396 * and suffering and to the dark side.
1401 ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
1405 ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg), GFP_ATOMIC);
1408 ULIST_ITER_INIT(&tmp_uiter);
1409 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1410 struct btrfs_qgroup_list *glist;
1412 qg = u64_to_ptr(tmp_unode->aux);
1414 * We use this sequence number to keep from having to
1415 * run the whole list and 0 out the refcnt every time.
1416 * We basically use sequnce as the known 0 count and
1417 * then add 1 everytime we see a qgroup. This is how we
1418 * get how many of the roots actually point up to the
1419 * upper level qgroups in order to determine exclusive
1422 * For rescan we want to set old_refcnt to seq so our
1423 * exclusive calculations end up correct.
1426 qg->old_refcnt = seq;
1427 else if (qg->old_refcnt < seq)
1428 qg->old_refcnt = seq + 1;
1432 if (qg->new_refcnt < seq)
1433 qg->new_refcnt = seq + 1;
1436 list_for_each_entry(glist, &qg->groups, next_group) {
1437 ret = ulist_add(qgroups, glist->group->qgroupid,
1438 ptr_to_u64(glist->group),
1442 ret = ulist_add(tmp, glist->group->qgroupid,
1443 ptr_to_u64(glist->group),
1454 * We need to walk forward in our operation tree and account for any roots that
1455 * were deleted after we made this operation.
1457 static int qgroup_account_deleted_refs(struct btrfs_fs_info *fs_info,
1458 struct btrfs_qgroup_operation *oper,
1460 struct ulist *qgroups, u64 seq,
1463 struct ulist_node *unode;
1464 struct ulist_iterator uiter;
1465 struct btrfs_qgroup *qg;
1466 struct btrfs_qgroup_operation *tmp_oper;
1473 * We only walk forward in the tree since we're only interested in
1474 * removals that happened _after_ our operation.
1476 spin_lock(&fs_info->qgroup_op_lock);
1477 n = rb_next(&oper->n);
1478 spin_unlock(&fs_info->qgroup_op_lock);
1481 tmp_oper = rb_entry(n, struct btrfs_qgroup_operation, n);
1482 while (tmp_oper->bytenr == oper->bytenr) {
1484 * If it's not a removal we don't care, additions work out
1485 * properly with our refcnt tracking.
1487 if (tmp_oper->type != BTRFS_QGROUP_OPER_SUB_SHARED &&
1488 tmp_oper->type != BTRFS_QGROUP_OPER_SUB_EXCL)
1490 qg = find_qgroup_rb(fs_info, tmp_oper->ref_root);
1493 ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
1499 * We only want to increase old_roots if this qgroup is
1500 * not already in the list of qgroups. If it is already
1501 * there then that means it must have been re-added or
1502 * the delete will be discarded because we had an
1503 * existing ref that we haven't looked up yet. In this
1504 * case we don't want to increase old_roots. So if ret
1505 * == 1 then we know that this is the first time we've
1506 * seen this qgroup and we can bump the old_roots.
1509 ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg),
1515 spin_lock(&fs_info->qgroup_op_lock);
1516 n = rb_next(&tmp_oper->n);
1517 spin_unlock(&fs_info->qgroup_op_lock);
1520 tmp_oper = rb_entry(n, struct btrfs_qgroup_operation, n);
1523 /* Ok now process the qgroups we found */
1524 ULIST_ITER_INIT(&uiter);
1525 while ((unode = ulist_next(tmp, &uiter))) {
1526 struct btrfs_qgroup_list *glist;
1528 qg = u64_to_ptr(unode->aux);
1529 if (qg->old_refcnt < seq)
1530 qg->old_refcnt = seq + 1;
1533 if (qg->new_refcnt < seq)
1534 qg->new_refcnt = seq + 1;
1537 list_for_each_entry(glist, &qg->groups, next_group) {
1538 ret = ulist_add(qgroups, glist->group->qgroupid,
1539 ptr_to_u64(glist->group), GFP_ATOMIC);
1542 ret = ulist_add(tmp, glist->group->qgroupid,
1543 ptr_to_u64(glist->group), GFP_ATOMIC);
1551 /* Add refcnt for the newly added reference. */
1552 static int qgroup_calc_new_refcnt(struct btrfs_fs_info *fs_info,
1553 struct btrfs_qgroup_operation *oper,
1554 struct btrfs_qgroup *qgroup,
1555 struct ulist *tmp, struct ulist *qgroups,
1558 struct ulist_node *unode;
1559 struct ulist_iterator uiter;
1560 struct btrfs_qgroup *qg;
1564 ret = ulist_add(qgroups, qgroup->qgroupid, ptr_to_u64(qgroup),
1568 ret = ulist_add(tmp, qgroup->qgroupid, ptr_to_u64(qgroup),
1572 ULIST_ITER_INIT(&uiter);
1573 while ((unode = ulist_next(tmp, &uiter))) {
1574 struct btrfs_qgroup_list *glist;
1576 qg = u64_to_ptr(unode->aux);
1577 if (oper->type == BTRFS_QGROUP_OPER_ADD_SHARED) {
1578 if (qg->new_refcnt < seq)
1579 qg->new_refcnt = seq + 1;
1583 if (qg->old_refcnt < seq)
1584 qg->old_refcnt = seq + 1;
1588 list_for_each_entry(glist, &qg->groups, next_group) {
1589 ret = ulist_add(tmp, glist->group->qgroupid,
1590 ptr_to_u64(glist->group), GFP_ATOMIC);
1593 ret = ulist_add(qgroups, glist->group->qgroupid,
1594 ptr_to_u64(glist->group), GFP_ATOMIC);
1603 * This adjusts the counters for all referenced qgroups if need be.
1605 static int qgroup_adjust_counters(struct btrfs_fs_info *fs_info,
1606 u64 root_to_skip, u64 num_bytes,
1607 struct ulist *qgroups, u64 seq,
1608 int old_roots, int new_roots, int rescan)
1610 struct ulist_node *unode;
1611 struct ulist_iterator uiter;
1612 struct btrfs_qgroup *qg;
1613 u64 cur_new_count, cur_old_count;
1615 ULIST_ITER_INIT(&uiter);
1616 while ((unode = ulist_next(qgroups, &uiter))) {
1619 qg = u64_to_ptr(unode->aux);
1621 * Wasn't referenced before but is now, add to the reference
1624 if (qg->old_refcnt <= seq && qg->new_refcnt > seq) {
1625 qg->rfer += num_bytes;
1626 qg->rfer_cmpr += num_bytes;
1631 * Was referenced before but isn't now, subtract from the
1632 * reference counters.
1634 if (qg->old_refcnt > seq && qg->new_refcnt <= seq) {
1635 qg->rfer -= num_bytes;
1636 qg->rfer_cmpr -= num_bytes;
1640 if (qg->old_refcnt < seq)
1643 cur_old_count = qg->old_refcnt - seq;
1644 if (qg->new_refcnt < seq)
1647 cur_new_count = qg->new_refcnt - seq;
1650 * If our refcount was the same as the roots previously but our
1651 * new count isn't the same as the number of roots now then we
1652 * went from having a exclusive reference on this range to not.
1654 if (old_roots && cur_old_count == old_roots &&
1655 (cur_new_count != new_roots || new_roots == 0)) {
1656 WARN_ON(cur_new_count != new_roots && new_roots == 0);
1657 qg->excl -= num_bytes;
1658 qg->excl_cmpr -= num_bytes;
1663 * If we didn't reference all the roots before but now we do we
1664 * have an exclusive reference to this range.
1666 if ((!old_roots || (old_roots && cur_old_count != old_roots))
1667 && cur_new_count == new_roots) {
1668 qg->excl += num_bytes;
1669 qg->excl_cmpr += num_bytes;
1674 qgroup_dirty(fs_info, qg);
1680 * If we removed a data extent and there were other references for that bytenr
1681 * then we need to lookup all referenced roots to make sure we still don't
1682 * reference this bytenr. If we do then we can just discard this operation.
1684 static int check_existing_refs(struct btrfs_trans_handle *trans,
1685 struct btrfs_fs_info *fs_info,
1686 struct btrfs_qgroup_operation *oper)
1688 struct ulist *roots = NULL;
1689 struct ulist_node *unode;
1690 struct ulist_iterator uiter;
1693 ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr,
1694 oper->elem.seq, &roots);
1699 ULIST_ITER_INIT(&uiter);
1700 while ((unode = ulist_next(roots, &uiter))) {
1701 if (unode->val == oper->ref_root) {
1707 btrfs_put_tree_mod_seq(fs_info, &oper->elem);
1713 * If we share a reference across multiple roots then we may need to adjust
1714 * various qgroups referenced and exclusive counters. The basic premise is this
1716 * 1) We have seq to represent a 0 count. Instead of looping through all of the
1717 * qgroups and resetting their refcount to 0 we just constantly bump this
1718 * sequence number to act as the base reference count. This means that if
1719 * anybody is equal to or below this sequence they were never referenced. We
1720 * jack this sequence up by the number of roots we found each time in order to
1721 * make sure we don't have any overlap.
1723 * 2) We first search all the roots that reference the area _except_ the root
1724 * we're acting on currently. This makes up the old_refcnt of all the qgroups
1727 * 3) We walk all of the qgroups referenced by the root we are currently acting
1728 * on, and will either adjust old_refcnt in the case of a removal or the
1729 * new_refcnt in the case of an addition.
1731 * 4) Finally we walk all the qgroups that are referenced by this range
1732 * including the root we are acting on currently. We will adjust the counters
1733 * based on the number of roots we had and will have after this operation.
1735 * Take this example as an illustration
1739 * [qg 0/0] [qg 0/1] [qg 0/2]
1743 * Say we are adding a reference that is covered by qg 0/0. The first step
1744 * would give a refcnt of 1 to qg 0/1 and 0/2 and a refcnt of 2 to qg 1/0 with
1745 * old_roots being 2. Because it is adding new_roots will be 1. We then go
1746 * through qg 0/0 which will get the new_refcnt set to 1 and add 1 to qg 1/0's
1747 * new_refcnt, bringing it to 3. We then walk through all of the qgroups, we
1748 * notice that the old refcnt for qg 0/0 < the new refcnt, so we added a
1749 * reference and thus must add the size to the referenced bytes. Everything
1750 * else is the same so nothing else changes.
1752 static int qgroup_shared_accounting(struct btrfs_trans_handle *trans,
1753 struct btrfs_fs_info *fs_info,
1754 struct btrfs_qgroup_operation *oper)
1756 struct ulist *roots = NULL;
1757 struct ulist *qgroups, *tmp;
1758 struct btrfs_qgroup *qgroup;
1759 struct seq_list elem = {};
1765 if (oper->elem.seq) {
1766 ret = check_existing_refs(trans, fs_info, oper);
1773 qgroups = ulist_alloc(GFP_NOFS);
1777 tmp = ulist_alloc(GFP_NOFS);
1781 btrfs_get_tree_mod_seq(fs_info, &elem);
1782 ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr, elem.seq,
1784 btrfs_put_tree_mod_seq(fs_info, &elem);
1786 ulist_free(qgroups);
1790 spin_lock(&fs_info->qgroup_lock);
1791 qgroup = find_qgroup_rb(fs_info, oper->ref_root);
1794 seq = fs_info->qgroup_seq;
1797 * So roots is the list of all the roots currently pointing at the
1798 * bytenr, including the ref we are adding if we are adding, or not if
1799 * we are removing a ref. So we pass in the ref_root to skip that root
1800 * in our calculations. We set old_refnct and new_refcnt cause who the
1801 * hell knows what everything looked like before, and it doesn't matter
1804 ret = qgroup_calc_old_refcnt(fs_info, oper->ref_root, tmp, roots, qgroups,
1805 seq, &old_roots, 0);
1810 * Now adjust the refcounts of the qgroups that care about this
1811 * reference, either the old_count in the case of removal or new_count
1812 * in the case of an addition.
1814 ret = qgroup_calc_new_refcnt(fs_info, oper, qgroup, tmp, qgroups,
1820 * ...in the case of removals. If we had a removal before we got around
1821 * to processing this operation then we need to find that guy and count
1822 * his references as if they really existed so we don't end up screwing
1823 * up the exclusive counts. Then whenever we go to process the delete
1824 * everything will be grand and we can account for whatever exclusive
1825 * changes need to be made there. We also have to pass in old_roots so
1826 * we have an accurate count of the roots as it pertains to this
1827 * operations view of the world.
1829 ret = qgroup_account_deleted_refs(fs_info, oper, tmp, qgroups, seq,
1835 * We are adding our root, need to adjust up the number of roots,
1836 * otherwise old_roots is the number of roots we want.
1838 if (oper->type == BTRFS_QGROUP_OPER_ADD_SHARED) {
1839 new_roots = old_roots + 1;
1841 new_roots = old_roots;
1844 fs_info->qgroup_seq += old_roots + 1;
1848 * And now the magic happens, bless Arne for having a pretty elegant
1849 * solution for this.
1851 qgroup_adjust_counters(fs_info, oper->ref_root, oper->num_bytes,
1852 qgroups, seq, old_roots, new_roots, 0);
1854 spin_unlock(&fs_info->qgroup_lock);
1855 ulist_free(qgroups);
1862 * btrfs_qgroup_account_ref is called for every ref that is added to or deleted
1863 * from the fs. First, all roots referencing the extent are searched, and
1864 * then the space is accounted accordingly to the different roots. The
1865 * accounting algorithm works in 3 steps documented inline.
1867 static int btrfs_qgroup_account(struct btrfs_trans_handle *trans,
1868 struct btrfs_fs_info *fs_info,
1869 struct btrfs_qgroup_operation *oper)
1873 if (!fs_info->quota_enabled)
1876 BUG_ON(!fs_info->quota_root);
1878 mutex_lock(&fs_info->qgroup_rescan_lock);
1879 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
1880 if (fs_info->qgroup_rescan_progress.objectid <= oper->bytenr) {
1881 mutex_unlock(&fs_info->qgroup_rescan_lock);
1885 mutex_unlock(&fs_info->qgroup_rescan_lock);
1887 ASSERT(is_fstree(oper->ref_root));
1889 switch (oper->type) {
1890 case BTRFS_QGROUP_OPER_ADD_EXCL:
1891 case BTRFS_QGROUP_OPER_SUB_EXCL:
1892 ret = qgroup_excl_accounting(fs_info, oper);
1894 case BTRFS_QGROUP_OPER_ADD_SHARED:
1895 case BTRFS_QGROUP_OPER_SUB_SHARED:
1896 ret = qgroup_shared_accounting(trans, fs_info, oper);
1905 * Needs to be called everytime we run delayed refs, even if there is an error
1906 * in order to cleanup outstanding operations.
1908 int btrfs_delayed_qgroup_accounting(struct btrfs_trans_handle *trans,
1909 struct btrfs_fs_info *fs_info)
1911 struct btrfs_qgroup_operation *oper;
1914 while (!list_empty(&trans->qgroup_ref_list)) {
1915 oper = list_first_entry(&trans->qgroup_ref_list,
1916 struct btrfs_qgroup_operation, list);
1917 list_del_init(&oper->list);
1918 if (!ret || !trans->aborted)
1919 ret = btrfs_qgroup_account(trans, fs_info, oper);
1920 spin_lock(&fs_info->qgroup_op_lock);
1921 rb_erase(&oper->n, &fs_info->qgroup_op_tree);
1922 spin_unlock(&fs_info->qgroup_op_lock);
1923 btrfs_put_tree_mod_seq(fs_info, &oper->elem);
1930 * called from commit_transaction. Writes all changed qgroups to disk.
1932 int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
1933 struct btrfs_fs_info *fs_info)
1935 struct btrfs_root *quota_root = fs_info->quota_root;
1937 int start_rescan_worker = 0;
1942 if (!fs_info->quota_enabled && fs_info->pending_quota_state)
1943 start_rescan_worker = 1;
1945 fs_info->quota_enabled = fs_info->pending_quota_state;
1947 spin_lock(&fs_info->qgroup_lock);
1948 while (!list_empty(&fs_info->dirty_qgroups)) {
1949 struct btrfs_qgroup *qgroup;
1950 qgroup = list_first_entry(&fs_info->dirty_qgroups,
1951 struct btrfs_qgroup, dirty);
1952 list_del_init(&qgroup->dirty);
1953 spin_unlock(&fs_info->qgroup_lock);
1954 ret = update_qgroup_info_item(trans, quota_root, qgroup);
1956 fs_info->qgroup_flags |=
1957 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1958 spin_lock(&fs_info->qgroup_lock);
1960 if (fs_info->quota_enabled)
1961 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
1963 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1964 spin_unlock(&fs_info->qgroup_lock);
1966 ret = update_qgroup_status_item(trans, fs_info, quota_root);
1968 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1970 if (!ret && start_rescan_worker) {
1971 ret = qgroup_rescan_init(fs_info, 0, 1);
1973 qgroup_rescan_zero_tracking(fs_info);
1974 btrfs_queue_work(fs_info->qgroup_rescan_workers,
1975 &fs_info->qgroup_rescan_work);
1986 * copy the acounting information between qgroups. This is necessary when a
1987 * snapshot or a subvolume is created
1989 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1990 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
1991 struct btrfs_qgroup_inherit *inherit)
1996 struct btrfs_root *quota_root = fs_info->quota_root;
1997 struct btrfs_qgroup *srcgroup;
1998 struct btrfs_qgroup *dstgroup;
2002 mutex_lock(&fs_info->qgroup_ioctl_lock);
2003 if (!fs_info->quota_enabled)
2012 i_qgroups = (u64 *)(inherit + 1);
2013 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
2014 2 * inherit->num_excl_copies;
2015 for (i = 0; i < nums; ++i) {
2016 srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
2026 * create a tracking group for the subvol itself
2028 ret = add_qgroup_item(trans, quota_root, objectid);
2032 if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
2033 ret = update_qgroup_limit_item(trans, quota_root, objectid,
2035 inherit->lim.max_rfer,
2036 inherit->lim.max_excl,
2037 inherit->lim.rsv_rfer,
2038 inherit->lim.rsv_excl);
2044 struct btrfs_root *srcroot;
2045 struct btrfs_key srckey;
2048 srckey.objectid = srcid;
2049 srckey.type = BTRFS_ROOT_ITEM_KEY;
2050 srckey.offset = (u64)-1;
2051 srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
2052 if (IS_ERR(srcroot)) {
2053 ret = PTR_ERR(srcroot);
2058 srcroot_level = btrfs_header_level(srcroot->node);
2059 level_size = btrfs_level_size(srcroot, srcroot_level);
2064 * add qgroup to all inherited groups
2067 i_qgroups = (u64 *)(inherit + 1);
2068 for (i = 0; i < inherit->num_qgroups; ++i) {
2069 ret = add_qgroup_relation_item(trans, quota_root,
2070 objectid, *i_qgroups);
2073 ret = add_qgroup_relation_item(trans, quota_root,
2074 *i_qgroups, objectid);
2082 spin_lock(&fs_info->qgroup_lock);
2084 dstgroup = add_qgroup_rb(fs_info, objectid);
2085 if (IS_ERR(dstgroup)) {
2086 ret = PTR_ERR(dstgroup);
2091 srcgroup = find_qgroup_rb(fs_info, srcid);
2096 * We call inherit after we clone the root in order to make sure
2097 * our counts don't go crazy, so at this point the only
2098 * difference between the two roots should be the root node.
2100 dstgroup->rfer = srcgroup->rfer;
2101 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
2102 dstgroup->excl = level_size;
2103 dstgroup->excl_cmpr = level_size;
2104 srcgroup->excl = level_size;
2105 srcgroup->excl_cmpr = level_size;
2106 qgroup_dirty(fs_info, dstgroup);
2107 qgroup_dirty(fs_info, srcgroup);
2113 i_qgroups = (u64 *)(inherit + 1);
2114 for (i = 0; i < inherit->num_qgroups; ++i) {
2115 ret = add_relation_rb(quota_root->fs_info, objectid,
2122 for (i = 0; i < inherit->num_ref_copies; ++i) {
2123 struct btrfs_qgroup *src;
2124 struct btrfs_qgroup *dst;
2126 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2127 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2134 dst->rfer = src->rfer - level_size;
2135 dst->rfer_cmpr = src->rfer_cmpr - level_size;
2138 for (i = 0; i < inherit->num_excl_copies; ++i) {
2139 struct btrfs_qgroup *src;
2140 struct btrfs_qgroup *dst;
2142 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2143 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2150 dst->excl = src->excl + level_size;
2151 dst->excl_cmpr = src->excl_cmpr + level_size;
2156 spin_unlock(&fs_info->qgroup_lock);
2158 mutex_unlock(&fs_info->qgroup_ioctl_lock);
2163 * reserve some space for a qgroup and all its parents. The reservation takes
2164 * place with start_transaction or dealloc_reserve, similar to ENOSPC
2165 * accounting. If not enough space is available, EDQUOT is returned.
2166 * We assume that the requested space is new for all qgroups.
2168 int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
2170 struct btrfs_root *quota_root;
2171 struct btrfs_qgroup *qgroup;
2172 struct btrfs_fs_info *fs_info = root->fs_info;
2173 u64 ref_root = root->root_key.objectid;
2175 struct ulist_node *unode;
2176 struct ulist_iterator uiter;
2178 if (!is_fstree(ref_root))
2184 spin_lock(&fs_info->qgroup_lock);
2185 quota_root = fs_info->quota_root;
2189 qgroup = find_qgroup_rb(fs_info, ref_root);
2194 * in a first step, we check all affected qgroups if any limits would
2197 ulist_reinit(fs_info->qgroup_ulist);
2198 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2199 (uintptr_t)qgroup, GFP_ATOMIC);
2202 ULIST_ITER_INIT(&uiter);
2203 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2204 struct btrfs_qgroup *qg;
2205 struct btrfs_qgroup_list *glist;
2207 qg = u64_to_ptr(unode->aux);
2209 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
2210 qg->reserved + (s64)qg->rfer + num_bytes >
2216 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
2217 qg->reserved + (s64)qg->excl + num_bytes >
2223 list_for_each_entry(glist, &qg->groups, next_group) {
2224 ret = ulist_add(fs_info->qgroup_ulist,
2225 glist->group->qgroupid,
2226 (uintptr_t)glist->group, GFP_ATOMIC);
2233 * no limits exceeded, now record the reservation into all qgroups
2235 ULIST_ITER_INIT(&uiter);
2236 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2237 struct btrfs_qgroup *qg;
2239 qg = u64_to_ptr(unode->aux);
2241 qg->reserved += num_bytes;
2245 spin_unlock(&fs_info->qgroup_lock);
2249 void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
2251 struct btrfs_root *quota_root;
2252 struct btrfs_qgroup *qgroup;
2253 struct btrfs_fs_info *fs_info = root->fs_info;
2254 struct ulist_node *unode;
2255 struct ulist_iterator uiter;
2256 u64 ref_root = root->root_key.objectid;
2259 if (!is_fstree(ref_root))
2265 spin_lock(&fs_info->qgroup_lock);
2267 quota_root = fs_info->quota_root;
2271 qgroup = find_qgroup_rb(fs_info, ref_root);
2275 ulist_reinit(fs_info->qgroup_ulist);
2276 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2277 (uintptr_t)qgroup, GFP_ATOMIC);
2280 ULIST_ITER_INIT(&uiter);
2281 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2282 struct btrfs_qgroup *qg;
2283 struct btrfs_qgroup_list *glist;
2285 qg = u64_to_ptr(unode->aux);
2287 qg->reserved -= num_bytes;
2289 list_for_each_entry(glist, &qg->groups, next_group) {
2290 ret = ulist_add(fs_info->qgroup_ulist,
2291 glist->group->qgroupid,
2292 (uintptr_t)glist->group, GFP_ATOMIC);
2299 spin_unlock(&fs_info->qgroup_lock);
2302 void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
2304 if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
2306 btrfs_err(trans->root->fs_info,
2307 "qgroups not uptodate in trans handle %p: list is%s empty, "
2309 trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
2310 (u32)(trans->delayed_ref_elem.seq >> 32),
2311 (u32)trans->delayed_ref_elem.seq);
2316 * returns < 0 on error, 0 when more leafs are to be scanned.
2317 * returns 1 when done, 2 when done and FLAG_INCONSISTENT was cleared.
2320 qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
2321 struct btrfs_trans_handle *trans, struct ulist *qgroups,
2322 struct ulist *tmp, struct extent_buffer *scratch_leaf)
2324 struct btrfs_key found;
2325 struct ulist *roots = NULL;
2326 struct seq_list tree_mod_seq_elem = {};
2333 path->leave_spinning = 1;
2334 mutex_lock(&fs_info->qgroup_rescan_lock);
2335 ret = btrfs_search_slot_for_read(fs_info->extent_root,
2336 &fs_info->qgroup_rescan_progress,
2339 pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
2340 fs_info->qgroup_rescan_progress.objectid,
2341 fs_info->qgroup_rescan_progress.type,
2342 fs_info->qgroup_rescan_progress.offset, ret);
2346 * The rescan is about to end, we will not be scanning any
2347 * further blocks. We cannot unset the RESCAN flag here, because
2348 * we want to commit the transaction if everything went well.
2349 * To make the live accounting work in this phase, we set our
2350 * scan progress pointer such that every real extent objectid
2353 fs_info->qgroup_rescan_progress.objectid = (u64)-1;
2354 btrfs_release_path(path);
2355 mutex_unlock(&fs_info->qgroup_rescan_lock);
2359 btrfs_item_key_to_cpu(path->nodes[0], &found,
2360 btrfs_header_nritems(path->nodes[0]) - 1);
2361 fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
2363 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2364 memcpy(scratch_leaf, path->nodes[0], sizeof(*scratch_leaf));
2365 slot = path->slots[0];
2366 btrfs_release_path(path);
2367 mutex_unlock(&fs_info->qgroup_rescan_lock);
2369 for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
2370 btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
2371 if (found.type != BTRFS_EXTENT_ITEM_KEY &&
2372 found.type != BTRFS_METADATA_ITEM_KEY)
2374 if (found.type == BTRFS_METADATA_ITEM_KEY)
2375 num_bytes = fs_info->extent_root->leafsize;
2377 num_bytes = found.offset;
2379 ulist_reinit(qgroups);
2380 ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
2384 spin_lock(&fs_info->qgroup_lock);
2385 seq = fs_info->qgroup_seq;
2386 fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
2389 ret = qgroup_calc_old_refcnt(fs_info, 0, tmp, roots, qgroups,
2390 seq, &new_roots, 1);
2392 spin_unlock(&fs_info->qgroup_lock);
2397 ret = qgroup_adjust_counters(fs_info, 0, num_bytes, qgroups,
2398 seq, 0, new_roots, 1);
2400 spin_unlock(&fs_info->qgroup_lock);
2404 spin_unlock(&fs_info->qgroup_lock);
2408 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2413 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
2415 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
2416 qgroup_rescan_work);
2417 struct btrfs_path *path;
2418 struct btrfs_trans_handle *trans = NULL;
2419 struct ulist *tmp = NULL, *qgroups = NULL;
2420 struct extent_buffer *scratch_leaf = NULL;
2423 path = btrfs_alloc_path();
2426 qgroups = ulist_alloc(GFP_NOFS);
2429 tmp = ulist_alloc(GFP_NOFS);
2432 scratch_leaf = kmalloc(sizeof(*scratch_leaf), GFP_NOFS);
2438 trans = btrfs_start_transaction(fs_info->fs_root, 0);
2439 if (IS_ERR(trans)) {
2440 err = PTR_ERR(trans);
2443 if (!fs_info->quota_enabled) {
2446 err = qgroup_rescan_leaf(fs_info, path, trans,
2447 qgroups, tmp, scratch_leaf);
2450 btrfs_commit_transaction(trans, fs_info->fs_root);
2452 btrfs_end_transaction(trans, fs_info->fs_root);
2456 kfree(scratch_leaf);
2457 ulist_free(qgroups);
2458 btrfs_free_path(path);
2460 mutex_lock(&fs_info->qgroup_rescan_lock);
2461 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2464 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
2465 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2466 } else if (err < 0) {
2467 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2469 mutex_unlock(&fs_info->qgroup_rescan_lock);
2472 btrfs_info(fs_info, "qgroup scan completed%s",
2473 err == 2 ? " (inconsistency flag cleared)" : "");
2475 btrfs_err(fs_info, "qgroup scan failed with %d", err);
2478 complete_all(&fs_info->qgroup_rescan_completion);
2482 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
2483 * memory required for the rescan context.
2486 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
2492 (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ||
2493 !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) {
2498 mutex_lock(&fs_info->qgroup_rescan_lock);
2499 spin_lock(&fs_info->qgroup_lock);
2502 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2504 else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
2508 spin_unlock(&fs_info->qgroup_lock);
2509 mutex_unlock(&fs_info->qgroup_rescan_lock);
2513 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2516 memset(&fs_info->qgroup_rescan_progress, 0,
2517 sizeof(fs_info->qgroup_rescan_progress));
2518 fs_info->qgroup_rescan_progress.objectid = progress_objectid;
2520 spin_unlock(&fs_info->qgroup_lock);
2521 mutex_unlock(&fs_info->qgroup_rescan_lock);
2523 init_completion(&fs_info->qgroup_rescan_completion);
2525 memset(&fs_info->qgroup_rescan_work, 0,
2526 sizeof(fs_info->qgroup_rescan_work));
2527 btrfs_init_work(&fs_info->qgroup_rescan_work,
2528 btrfs_qgroup_rescan_worker, NULL, NULL);
2532 btrfs_info(fs_info, "qgroup_rescan_init failed with %d", ret);
2540 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
2543 struct btrfs_qgroup *qgroup;
2545 spin_lock(&fs_info->qgroup_lock);
2546 /* clear all current qgroup tracking information */
2547 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
2548 qgroup = rb_entry(n, struct btrfs_qgroup, node);
2550 qgroup->rfer_cmpr = 0;
2552 qgroup->excl_cmpr = 0;
2554 spin_unlock(&fs_info->qgroup_lock);
2558 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
2561 struct btrfs_trans_handle *trans;
2563 ret = qgroup_rescan_init(fs_info, 0, 1);
2568 * We have set the rescan_progress to 0, which means no more
2569 * delayed refs will be accounted by btrfs_qgroup_account_ref.
2570 * However, btrfs_qgroup_account_ref may be right after its call
2571 * to btrfs_find_all_roots, in which case it would still do the
2573 * To solve this, we're committing the transaction, which will
2574 * ensure we run all delayed refs and only after that, we are
2575 * going to clear all tracking information for a clean start.
2578 trans = btrfs_join_transaction(fs_info->fs_root);
2579 if (IS_ERR(trans)) {
2580 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2581 return PTR_ERR(trans);
2583 ret = btrfs_commit_transaction(trans, fs_info->fs_root);
2585 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2589 qgroup_rescan_zero_tracking(fs_info);
2591 btrfs_queue_work(fs_info->qgroup_rescan_workers,
2592 &fs_info->qgroup_rescan_work);
2597 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info)
2602 mutex_lock(&fs_info->qgroup_rescan_lock);
2603 spin_lock(&fs_info->qgroup_lock);
2604 running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2605 spin_unlock(&fs_info->qgroup_lock);
2606 mutex_unlock(&fs_info->qgroup_rescan_lock);
2609 ret = wait_for_completion_interruptible(
2610 &fs_info->qgroup_rescan_completion);
2616 * this is only called from open_ctree where we're still single threaded, thus
2617 * locking is omitted here.
2620 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
2622 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2623 btrfs_queue_work(fs_info->qgroup_rescan_workers,
2624 &fs_info->qgroup_rescan_work);