2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include <linux/btrfs.h>
29 #include "transaction.h"
34 #include "extent_io.h"
38 * - subvol delete -> delete when ref goes to 0? delete limits also?
42 * - copy also limits on subvol creation
44 * - caches fuer ulists
45 * - performance benchmarks
46 * - check all ioctl parameters
50 * one struct for each qgroup, organized in fs_info->qgroup_tree.
58 u64 rfer; /* referenced */
59 u64 rfer_cmpr; /* referenced compressed */
60 u64 excl; /* exclusive */
61 u64 excl_cmpr; /* exclusive compressed */
66 u64 lim_flags; /* which limits are set */
73 * reservation tracking
80 struct list_head groups; /* groups this group is member of */
81 struct list_head members; /* groups that are members of this group */
82 struct list_head dirty; /* dirty groups */
83 struct rb_node node; /* tree of qgroups */
86 * temp variables for accounting operations
93 * glue structure to represent the relations between qgroups.
95 struct btrfs_qgroup_list {
96 struct list_head next_group;
97 struct list_head next_member;
98 struct btrfs_qgroup *group;
99 struct btrfs_qgroup *member;
102 #define ptr_to_u64(x) ((u64)(uintptr_t)x)
103 #define u64_to_ptr(x) ((struct btrfs_qgroup *)(uintptr_t)x)
106 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
108 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
110 /* must be called with qgroup_ioctl_lock held */
111 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
114 struct rb_node *n = fs_info->qgroup_tree.rb_node;
115 struct btrfs_qgroup *qgroup;
118 qgroup = rb_entry(n, struct btrfs_qgroup, node);
119 if (qgroup->qgroupid < qgroupid)
121 else if (qgroup->qgroupid > qgroupid)
129 /* must be called with qgroup_lock held */
130 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
133 struct rb_node **p = &fs_info->qgroup_tree.rb_node;
134 struct rb_node *parent = NULL;
135 struct btrfs_qgroup *qgroup;
139 qgroup = rb_entry(parent, struct btrfs_qgroup, node);
141 if (qgroup->qgroupid < qgroupid)
143 else if (qgroup->qgroupid > qgroupid)
149 qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
151 return ERR_PTR(-ENOMEM);
153 qgroup->qgroupid = qgroupid;
154 INIT_LIST_HEAD(&qgroup->groups);
155 INIT_LIST_HEAD(&qgroup->members);
156 INIT_LIST_HEAD(&qgroup->dirty);
158 rb_link_node(&qgroup->node, parent, p);
159 rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
164 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
166 struct btrfs_qgroup_list *list;
168 list_del(&qgroup->dirty);
169 while (!list_empty(&qgroup->groups)) {
170 list = list_first_entry(&qgroup->groups,
171 struct btrfs_qgroup_list, next_group);
172 list_del(&list->next_group);
173 list_del(&list->next_member);
177 while (!list_empty(&qgroup->members)) {
178 list = list_first_entry(&qgroup->members,
179 struct btrfs_qgroup_list, next_member);
180 list_del(&list->next_group);
181 list_del(&list->next_member);
187 /* must be called with qgroup_lock held */
188 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
190 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
195 rb_erase(&qgroup->node, &fs_info->qgroup_tree);
196 __del_qgroup_rb(qgroup);
200 /* must be called with qgroup_lock held */
201 static int add_relation_rb(struct btrfs_fs_info *fs_info,
202 u64 memberid, u64 parentid)
204 struct btrfs_qgroup *member;
205 struct btrfs_qgroup *parent;
206 struct btrfs_qgroup_list *list;
208 member = find_qgroup_rb(fs_info, memberid);
209 parent = find_qgroup_rb(fs_info, parentid);
210 if (!member || !parent)
213 list = kzalloc(sizeof(*list), GFP_ATOMIC);
217 list->group = parent;
218 list->member = member;
219 list_add_tail(&list->next_group, &member->groups);
220 list_add_tail(&list->next_member, &parent->members);
225 /* must be called with qgroup_lock held */
226 static int del_relation_rb(struct btrfs_fs_info *fs_info,
227 u64 memberid, u64 parentid)
229 struct btrfs_qgroup *member;
230 struct btrfs_qgroup *parent;
231 struct btrfs_qgroup_list *list;
233 member = find_qgroup_rb(fs_info, memberid);
234 parent = find_qgroup_rb(fs_info, parentid);
235 if (!member || !parent)
238 list_for_each_entry(list, &member->groups, next_group) {
239 if (list->group == parent) {
240 list_del(&list->next_group);
241 list_del(&list->next_member);
249 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
250 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
253 struct btrfs_qgroup *qgroup;
255 qgroup = find_qgroup_rb(fs_info, qgroupid);
258 if (qgroup->rfer != rfer || qgroup->excl != excl)
265 * The full config is read in one go, only called from open_ctree()
266 * It doesn't use any locking, as at this point we're still single-threaded
268 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
270 struct btrfs_key key;
271 struct btrfs_key found_key;
272 struct btrfs_root *quota_root = fs_info->quota_root;
273 struct btrfs_path *path = NULL;
274 struct extent_buffer *l;
278 u64 rescan_progress = 0;
280 if (!fs_info->quota_enabled)
283 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
284 if (!fs_info->qgroup_ulist) {
289 path = btrfs_alloc_path();
295 /* default this to quota off, in case no status key is found */
296 fs_info->qgroup_flags = 0;
299 * pass 1: read status, all qgroup infos and limits
304 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
309 struct btrfs_qgroup *qgroup;
311 slot = path->slots[0];
313 btrfs_item_key_to_cpu(l, &found_key, slot);
315 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
316 struct btrfs_qgroup_status_item *ptr;
318 ptr = btrfs_item_ptr(l, slot,
319 struct btrfs_qgroup_status_item);
321 if (btrfs_qgroup_status_version(l, ptr) !=
322 BTRFS_QGROUP_STATUS_VERSION) {
324 "old qgroup version, quota disabled");
327 if (btrfs_qgroup_status_generation(l, ptr) !=
328 fs_info->generation) {
329 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
331 "qgroup generation mismatch, "
332 "marked as inconsistent");
334 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
336 rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
340 if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
341 found_key.type != BTRFS_QGROUP_LIMIT_KEY)
344 qgroup = find_qgroup_rb(fs_info, found_key.offset);
345 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
346 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
347 btrfs_err(fs_info, "inconsitent qgroup config");
348 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
351 qgroup = add_qgroup_rb(fs_info, found_key.offset);
352 if (IS_ERR(qgroup)) {
353 ret = PTR_ERR(qgroup);
357 switch (found_key.type) {
358 case BTRFS_QGROUP_INFO_KEY: {
359 struct btrfs_qgroup_info_item *ptr;
361 ptr = btrfs_item_ptr(l, slot,
362 struct btrfs_qgroup_info_item);
363 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
364 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
365 qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
366 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
367 /* generation currently unused */
370 case BTRFS_QGROUP_LIMIT_KEY: {
371 struct btrfs_qgroup_limit_item *ptr;
373 ptr = btrfs_item_ptr(l, slot,
374 struct btrfs_qgroup_limit_item);
375 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
376 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
377 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
378 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
379 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
384 ret = btrfs_next_item(quota_root, path);
390 btrfs_release_path(path);
393 * pass 2: read all qgroup relations
396 key.type = BTRFS_QGROUP_RELATION_KEY;
398 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
402 slot = path->slots[0];
404 btrfs_item_key_to_cpu(l, &found_key, slot);
406 if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
409 if (found_key.objectid > found_key.offset) {
410 /* parent <- member, not needed to build config */
411 /* FIXME should we omit the key completely? */
415 ret = add_relation_rb(fs_info, found_key.objectid,
417 if (ret == -ENOENT) {
419 "orphan qgroup relation 0x%llx->0x%llx",
420 found_key.objectid, found_key.offset);
421 ret = 0; /* ignore the error */
426 ret = btrfs_next_item(quota_root, path);
433 fs_info->qgroup_flags |= flags;
434 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
435 fs_info->quota_enabled = 0;
436 fs_info->pending_quota_state = 0;
437 } else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
439 ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
441 btrfs_free_path(path);
444 ulist_free(fs_info->qgroup_ulist);
445 fs_info->qgroup_ulist = NULL;
446 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
449 return ret < 0 ? ret : 0;
453 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
454 * first two are in single-threaded paths.And for the third one, we have set
455 * quota_root to be null with qgroup_lock held before, so it is safe to clean
456 * up the in-memory structures without qgroup_lock held.
458 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
461 struct btrfs_qgroup *qgroup;
463 while ((n = rb_first(&fs_info->qgroup_tree))) {
464 qgroup = rb_entry(n, struct btrfs_qgroup, node);
465 rb_erase(n, &fs_info->qgroup_tree);
466 __del_qgroup_rb(qgroup);
469 * we call btrfs_free_qgroup_config() when umounting
470 * filesystem and disabling quota, so we set qgroup_ulit
471 * to be null here to avoid double free.
473 ulist_free(fs_info->qgroup_ulist);
474 fs_info->qgroup_ulist = NULL;
477 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
478 struct btrfs_root *quota_root,
482 struct btrfs_path *path;
483 struct btrfs_key key;
485 path = btrfs_alloc_path();
490 key.type = BTRFS_QGROUP_RELATION_KEY;
493 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
495 btrfs_mark_buffer_dirty(path->nodes[0]);
497 btrfs_free_path(path);
501 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
502 struct btrfs_root *quota_root,
506 struct btrfs_path *path;
507 struct btrfs_key key;
509 path = btrfs_alloc_path();
514 key.type = BTRFS_QGROUP_RELATION_KEY;
517 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
526 ret = btrfs_del_item(trans, quota_root, path);
528 btrfs_free_path(path);
532 static int add_qgroup_item(struct btrfs_trans_handle *trans,
533 struct btrfs_root *quota_root, u64 qgroupid)
536 struct btrfs_path *path;
537 struct btrfs_qgroup_info_item *qgroup_info;
538 struct btrfs_qgroup_limit_item *qgroup_limit;
539 struct extent_buffer *leaf;
540 struct btrfs_key key;
542 if (btrfs_test_is_dummy_root(quota_root))
545 path = btrfs_alloc_path();
550 key.type = BTRFS_QGROUP_INFO_KEY;
551 key.offset = qgroupid;
554 * Avoid a transaction abort by catching -EEXIST here. In that
555 * case, we proceed by re-initializing the existing structure
559 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
560 sizeof(*qgroup_info));
561 if (ret && ret != -EEXIST)
564 leaf = path->nodes[0];
565 qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
566 struct btrfs_qgroup_info_item);
567 btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
568 btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
569 btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
570 btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
571 btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
573 btrfs_mark_buffer_dirty(leaf);
575 btrfs_release_path(path);
577 key.type = BTRFS_QGROUP_LIMIT_KEY;
578 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
579 sizeof(*qgroup_limit));
580 if (ret && ret != -EEXIST)
583 leaf = path->nodes[0];
584 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
585 struct btrfs_qgroup_limit_item);
586 btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
587 btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
588 btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
589 btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
590 btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
592 btrfs_mark_buffer_dirty(leaf);
596 btrfs_free_path(path);
600 static int del_qgroup_item(struct btrfs_trans_handle *trans,
601 struct btrfs_root *quota_root, u64 qgroupid)
604 struct btrfs_path *path;
605 struct btrfs_key key;
607 path = btrfs_alloc_path();
612 key.type = BTRFS_QGROUP_INFO_KEY;
613 key.offset = qgroupid;
614 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
623 ret = btrfs_del_item(trans, quota_root, path);
627 btrfs_release_path(path);
629 key.type = BTRFS_QGROUP_LIMIT_KEY;
630 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
639 ret = btrfs_del_item(trans, quota_root, path);
642 btrfs_free_path(path);
646 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
647 struct btrfs_root *root,
648 struct btrfs_qgroup *qgroup)
650 struct btrfs_path *path;
651 struct btrfs_key key;
652 struct extent_buffer *l;
653 struct btrfs_qgroup_limit_item *qgroup_limit;
658 key.type = BTRFS_QGROUP_LIMIT_KEY;
659 key.offset = qgroup->qgroupid;
661 path = btrfs_alloc_path();
665 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
673 slot = path->slots[0];
674 qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
675 btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
676 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
677 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
678 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
679 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
681 btrfs_mark_buffer_dirty(l);
684 btrfs_free_path(path);
688 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
689 struct btrfs_root *root,
690 struct btrfs_qgroup *qgroup)
692 struct btrfs_path *path;
693 struct btrfs_key key;
694 struct extent_buffer *l;
695 struct btrfs_qgroup_info_item *qgroup_info;
699 if (btrfs_test_is_dummy_root(root))
703 key.type = BTRFS_QGROUP_INFO_KEY;
704 key.offset = qgroup->qgroupid;
706 path = btrfs_alloc_path();
710 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
718 slot = path->slots[0];
719 qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
720 btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
721 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
722 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
723 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
724 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
726 btrfs_mark_buffer_dirty(l);
729 btrfs_free_path(path);
733 static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
734 struct btrfs_fs_info *fs_info,
735 struct btrfs_root *root)
737 struct btrfs_path *path;
738 struct btrfs_key key;
739 struct extent_buffer *l;
740 struct btrfs_qgroup_status_item *ptr;
745 key.type = BTRFS_QGROUP_STATUS_KEY;
748 path = btrfs_alloc_path();
752 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
760 slot = path->slots[0];
761 ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
762 btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
763 btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
764 btrfs_set_qgroup_status_rescan(l, ptr,
765 fs_info->qgroup_rescan_progress.objectid);
767 btrfs_mark_buffer_dirty(l);
770 btrfs_free_path(path);
775 * called with qgroup_lock held
777 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
778 struct btrfs_root *root)
780 struct btrfs_path *path;
781 struct btrfs_key key;
782 struct extent_buffer *leaf = NULL;
786 path = btrfs_alloc_path();
790 path->leave_spinning = 1;
797 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
800 leaf = path->nodes[0];
801 nr = btrfs_header_nritems(leaf);
805 * delete the leaf one by one
806 * since the whole tree is going
810 ret = btrfs_del_items(trans, root, path, 0, nr);
814 btrfs_release_path(path);
818 root->fs_info->pending_quota_state = 0;
819 btrfs_free_path(path);
823 int btrfs_quota_enable(struct btrfs_trans_handle *trans,
824 struct btrfs_fs_info *fs_info)
826 struct btrfs_root *quota_root;
827 struct btrfs_root *tree_root = fs_info->tree_root;
828 struct btrfs_path *path = NULL;
829 struct btrfs_qgroup_status_item *ptr;
830 struct extent_buffer *leaf;
831 struct btrfs_key key;
832 struct btrfs_key found_key;
833 struct btrfs_qgroup *qgroup = NULL;
837 mutex_lock(&fs_info->qgroup_ioctl_lock);
838 if (fs_info->quota_root) {
839 fs_info->pending_quota_state = 1;
843 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
844 if (!fs_info->qgroup_ulist) {
850 * initially create the quota tree
852 quota_root = btrfs_create_tree(trans, fs_info,
853 BTRFS_QUOTA_TREE_OBJECTID);
854 if (IS_ERR(quota_root)) {
855 ret = PTR_ERR(quota_root);
859 path = btrfs_alloc_path();
866 key.type = BTRFS_QGROUP_STATUS_KEY;
869 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
874 leaf = path->nodes[0];
875 ptr = btrfs_item_ptr(leaf, path->slots[0],
876 struct btrfs_qgroup_status_item);
877 btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
878 btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
879 fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
880 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
881 btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
882 btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
884 btrfs_mark_buffer_dirty(leaf);
887 key.type = BTRFS_ROOT_REF_KEY;
890 btrfs_release_path(path);
891 ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
899 slot = path->slots[0];
900 leaf = path->nodes[0];
901 btrfs_item_key_to_cpu(leaf, &found_key, slot);
903 if (found_key.type == BTRFS_ROOT_REF_KEY) {
904 ret = add_qgroup_item(trans, quota_root,
909 qgroup = add_qgroup_rb(fs_info, found_key.offset);
910 if (IS_ERR(qgroup)) {
911 ret = PTR_ERR(qgroup);
915 ret = btrfs_next_item(tree_root, path);
923 btrfs_release_path(path);
924 ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
928 qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
929 if (IS_ERR(qgroup)) {
930 ret = PTR_ERR(qgroup);
933 spin_lock(&fs_info->qgroup_lock);
934 fs_info->quota_root = quota_root;
935 fs_info->pending_quota_state = 1;
936 spin_unlock(&fs_info->qgroup_lock);
938 btrfs_free_path(path);
941 free_extent_buffer(quota_root->node);
942 free_extent_buffer(quota_root->commit_root);
947 ulist_free(fs_info->qgroup_ulist);
948 fs_info->qgroup_ulist = NULL;
950 mutex_unlock(&fs_info->qgroup_ioctl_lock);
954 int btrfs_quota_disable(struct btrfs_trans_handle *trans,
955 struct btrfs_fs_info *fs_info)
957 struct btrfs_root *tree_root = fs_info->tree_root;
958 struct btrfs_root *quota_root;
961 mutex_lock(&fs_info->qgroup_ioctl_lock);
962 if (!fs_info->quota_root)
964 spin_lock(&fs_info->qgroup_lock);
965 fs_info->quota_enabled = 0;
966 fs_info->pending_quota_state = 0;
967 quota_root = fs_info->quota_root;
968 fs_info->quota_root = NULL;
969 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
970 spin_unlock(&fs_info->qgroup_lock);
972 btrfs_free_qgroup_config(fs_info);
974 ret = btrfs_clean_quota_tree(trans, quota_root);
978 ret = btrfs_del_root(trans, tree_root, "a_root->root_key);
982 list_del("a_root->dirty_list);
984 btrfs_tree_lock(quota_root->node);
985 clean_tree_block(trans, tree_root->fs_info, quota_root->node);
986 btrfs_tree_unlock(quota_root->node);
987 btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
989 free_extent_buffer(quota_root->node);
990 free_extent_buffer(quota_root->commit_root);
993 mutex_unlock(&fs_info->qgroup_ioctl_lock);
997 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
998 struct btrfs_qgroup *qgroup)
1000 if (list_empty(&qgroup->dirty))
1001 list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1005 * The easy accounting, if we are adding/removing the only ref for an extent
1006 * then this qgroup and all of the parent qgroups get their refrence and
1007 * exclusive counts adjusted.
1009 * Caller should hold fs_info->qgroup_lock.
1011 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1012 struct ulist *tmp, u64 ref_root,
1013 u64 num_bytes, int sign)
1015 struct btrfs_qgroup *qgroup;
1016 struct btrfs_qgroup_list *glist;
1017 struct ulist_node *unode;
1018 struct ulist_iterator uiter;
1021 qgroup = find_qgroup_rb(fs_info, ref_root);
1025 qgroup->rfer += sign * num_bytes;
1026 qgroup->rfer_cmpr += sign * num_bytes;
1028 WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1029 qgroup->excl += sign * num_bytes;
1030 qgroup->excl_cmpr += sign * num_bytes;
1032 qgroup->reserved -= num_bytes;
1034 qgroup_dirty(fs_info, qgroup);
1036 /* Get all of the parent groups that contain this qgroup */
1037 list_for_each_entry(glist, &qgroup->groups, next_group) {
1038 ret = ulist_add(tmp, glist->group->qgroupid,
1039 ptr_to_u64(glist->group), GFP_ATOMIC);
1044 /* Iterate all of the parents and adjust their reference counts */
1045 ULIST_ITER_INIT(&uiter);
1046 while ((unode = ulist_next(tmp, &uiter))) {
1047 qgroup = u64_to_ptr(unode->aux);
1048 qgroup->rfer += sign * num_bytes;
1049 qgroup->rfer_cmpr += sign * num_bytes;
1050 WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1051 qgroup->excl += sign * num_bytes;
1053 qgroup->reserved -= num_bytes;
1054 qgroup->excl_cmpr += sign * num_bytes;
1055 qgroup_dirty(fs_info, qgroup);
1057 /* Add any parents of the parents */
1058 list_for_each_entry(glist, &qgroup->groups, next_group) {
1059 ret = ulist_add(tmp, glist->group->qgroupid,
1060 ptr_to_u64(glist->group), GFP_ATOMIC);
1072 * Quick path for updating qgroup with only excl refs.
1074 * In that case, just update all parent will be enough.
1075 * Or we needs to do a full rescan.
1076 * Caller should also hold fs_info->qgroup_lock.
1078 * Return 0 for quick update, return >0 for need to full rescan
1079 * and mark INCONSISTENT flag.
1080 * Return < 0 for other error.
1082 static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1083 struct ulist *tmp, u64 src, u64 dst,
1086 struct btrfs_qgroup *qgroup;
1090 qgroup = find_qgroup_rb(fs_info, src);
1093 if (qgroup->excl == qgroup->rfer) {
1095 err = __qgroup_excl_accounting(fs_info, tmp, dst,
1096 qgroup->excl, sign);
1104 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1108 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
1109 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1111 struct btrfs_root *quota_root;
1112 struct btrfs_qgroup *parent;
1113 struct btrfs_qgroup *member;
1114 struct btrfs_qgroup_list *list;
1118 tmp = ulist_alloc(GFP_NOFS);
1122 /* Check the level of src and dst first */
1123 if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1126 mutex_lock(&fs_info->qgroup_ioctl_lock);
1127 quota_root = fs_info->quota_root;
1132 member = find_qgroup_rb(fs_info, src);
1133 parent = find_qgroup_rb(fs_info, dst);
1134 if (!member || !parent) {
1139 /* check if such qgroup relation exist firstly */
1140 list_for_each_entry(list, &member->groups, next_group) {
1141 if (list->group == parent) {
1147 ret = add_qgroup_relation_item(trans, quota_root, src, dst);
1151 ret = add_qgroup_relation_item(trans, quota_root, dst, src);
1153 del_qgroup_relation_item(trans, quota_root, src, dst);
1157 spin_lock(&fs_info->qgroup_lock);
1158 ret = add_relation_rb(quota_root->fs_info, src, dst);
1160 spin_unlock(&fs_info->qgroup_lock);
1163 ret = quick_update_accounting(fs_info, tmp, src, dst, 1);
1164 spin_unlock(&fs_info->qgroup_lock);
1166 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1171 int __del_qgroup_relation(struct btrfs_trans_handle *trans,
1172 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1174 struct btrfs_root *quota_root;
1175 struct btrfs_qgroup *parent;
1176 struct btrfs_qgroup *member;
1177 struct btrfs_qgroup_list *list;
1182 tmp = ulist_alloc(GFP_NOFS);
1186 quota_root = fs_info->quota_root;
1192 member = find_qgroup_rb(fs_info, src);
1193 parent = find_qgroup_rb(fs_info, dst);
1194 if (!member || !parent) {
1199 /* check if such qgroup relation exist firstly */
1200 list_for_each_entry(list, &member->groups, next_group) {
1201 if (list->group == parent)
1207 ret = del_qgroup_relation_item(trans, quota_root, src, dst);
1208 err = del_qgroup_relation_item(trans, quota_root, dst, src);
1212 spin_lock(&fs_info->qgroup_lock);
1213 del_relation_rb(fs_info, src, dst);
1214 ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
1215 spin_unlock(&fs_info->qgroup_lock);
1221 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
1222 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1226 mutex_lock(&fs_info->qgroup_ioctl_lock);
1227 ret = __del_qgroup_relation(trans, fs_info, src, dst);
1228 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1233 int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
1234 struct btrfs_fs_info *fs_info, u64 qgroupid)
1236 struct btrfs_root *quota_root;
1237 struct btrfs_qgroup *qgroup;
1240 mutex_lock(&fs_info->qgroup_ioctl_lock);
1241 quota_root = fs_info->quota_root;
1246 qgroup = find_qgroup_rb(fs_info, qgroupid);
1252 ret = add_qgroup_item(trans, quota_root, qgroupid);
1256 spin_lock(&fs_info->qgroup_lock);
1257 qgroup = add_qgroup_rb(fs_info, qgroupid);
1258 spin_unlock(&fs_info->qgroup_lock);
1261 ret = PTR_ERR(qgroup);
1263 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1267 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
1268 struct btrfs_fs_info *fs_info, u64 qgroupid)
1270 struct btrfs_root *quota_root;
1271 struct btrfs_qgroup *qgroup;
1272 struct btrfs_qgroup_list *list;
1275 mutex_lock(&fs_info->qgroup_ioctl_lock);
1276 quota_root = fs_info->quota_root;
1282 qgroup = find_qgroup_rb(fs_info, qgroupid);
1287 /* check if there are no children of this qgroup */
1288 if (!list_empty(&qgroup->members)) {
1293 ret = del_qgroup_item(trans, quota_root, qgroupid);
1295 while (!list_empty(&qgroup->groups)) {
1296 list = list_first_entry(&qgroup->groups,
1297 struct btrfs_qgroup_list, next_group);
1298 ret = __del_qgroup_relation(trans, fs_info,
1300 list->group->qgroupid);
1305 spin_lock(&fs_info->qgroup_lock);
1306 del_qgroup_rb(quota_root->fs_info, qgroupid);
1307 spin_unlock(&fs_info->qgroup_lock);
1309 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1313 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1314 struct btrfs_fs_info *fs_info, u64 qgroupid,
1315 struct btrfs_qgroup_limit *limit)
1317 struct btrfs_root *quota_root;
1318 struct btrfs_qgroup *qgroup;
1321 mutex_lock(&fs_info->qgroup_ioctl_lock);
1322 quota_root = fs_info->quota_root;
1328 qgroup = find_qgroup_rb(fs_info, qgroupid);
1334 spin_lock(&fs_info->qgroup_lock);
1335 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER)
1336 qgroup->max_rfer = limit->max_rfer;
1337 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL)
1338 qgroup->max_excl = limit->max_excl;
1339 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER)
1340 qgroup->rsv_rfer = limit->rsv_rfer;
1341 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL)
1342 qgroup->rsv_excl = limit->rsv_excl;
1343 qgroup->lim_flags |= limit->flags;
1345 spin_unlock(&fs_info->qgroup_lock);
1347 ret = update_qgroup_limit_item(trans, quota_root, qgroup);
1349 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1350 btrfs_info(fs_info, "unable to update quota limit for %llu",
1355 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1359 static int comp_oper_exist(struct btrfs_qgroup_operation *oper1,
1360 struct btrfs_qgroup_operation *oper2)
1363 * Ignore seq and type here, we're looking for any operation
1364 * at all related to this extent on that root.
1366 if (oper1->bytenr < oper2->bytenr)
1368 if (oper1->bytenr > oper2->bytenr)
1370 if (oper1->ref_root < oper2->ref_root)
1372 if (oper1->ref_root > oper2->ref_root)
1377 static int qgroup_oper_exists(struct btrfs_fs_info *fs_info,
1378 struct btrfs_qgroup_operation *oper)
1381 struct btrfs_qgroup_operation *cur;
1384 spin_lock(&fs_info->qgroup_op_lock);
1385 n = fs_info->qgroup_op_tree.rb_node;
1387 cur = rb_entry(n, struct btrfs_qgroup_operation, n);
1388 cmp = comp_oper_exist(cur, oper);
1394 spin_unlock(&fs_info->qgroup_op_lock);
1398 spin_unlock(&fs_info->qgroup_op_lock);
1402 static int comp_oper(struct btrfs_qgroup_operation *oper1,
1403 struct btrfs_qgroup_operation *oper2)
1405 if (oper1->bytenr < oper2->bytenr)
1407 if (oper1->bytenr > oper2->bytenr)
1409 if (oper1->ref_root < oper2->ref_root)
1411 if (oper1->ref_root > oper2->ref_root)
1413 if (oper1->seq < oper2->seq)
1415 if (oper1->seq > oper2->seq)
1417 if (oper1->type < oper2->type)
1419 if (oper1->type > oper2->type)
1424 static int insert_qgroup_oper(struct btrfs_fs_info *fs_info,
1425 struct btrfs_qgroup_operation *oper)
1428 struct rb_node *parent = NULL;
1429 struct btrfs_qgroup_operation *cur;
1432 spin_lock(&fs_info->qgroup_op_lock);
1433 p = &fs_info->qgroup_op_tree.rb_node;
1436 cur = rb_entry(parent, struct btrfs_qgroup_operation, n);
1437 cmp = comp_oper(cur, oper);
1439 p = &(*p)->rb_right;
1443 spin_unlock(&fs_info->qgroup_op_lock);
1447 rb_link_node(&oper->n, parent, p);
1448 rb_insert_color(&oper->n, &fs_info->qgroup_op_tree);
1449 spin_unlock(&fs_info->qgroup_op_lock);
1454 * Record a quota operation for processing later on.
1455 * @trans: the transaction we are adding the delayed op to.
1456 * @fs_info: the fs_info for this fs.
1457 * @ref_root: the root of the reference we are acting on,
1458 * @bytenr: the bytenr we are acting on.
1459 * @num_bytes: the number of bytes in the reference.
1460 * @type: the type of operation this is.
1461 * @mod_seq: do we need to get a sequence number for looking up roots.
1463 * We just add it to our trans qgroup_ref_list and carry on and process these
1464 * operations in order at some later point. If the reference root isn't a fs
1465 * root then we don't bother with doing anything.
1467 * MUST BE HOLDING THE REF LOCK.
1469 int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
1470 struct btrfs_fs_info *fs_info, u64 ref_root,
1471 u64 bytenr, u64 num_bytes,
1472 enum btrfs_qgroup_operation_type type, int mod_seq)
1474 struct btrfs_qgroup_operation *oper;
1477 if (!is_fstree(ref_root) || !fs_info->quota_enabled)
1480 oper = kmalloc(sizeof(*oper), GFP_NOFS);
1484 oper->ref_root = ref_root;
1485 oper->bytenr = bytenr;
1486 oper->num_bytes = num_bytes;
1488 oper->seq = atomic_inc_return(&fs_info->qgroup_op_seq);
1489 INIT_LIST_HEAD(&oper->elem.list);
1492 trace_btrfs_qgroup_record_ref(oper);
1494 if (type == BTRFS_QGROUP_OPER_SUB_SUBTREE) {
1496 * If any operation for this bytenr/ref_root combo
1497 * exists, then we know it's not exclusively owned and
1498 * shouldn't be queued up.
1500 * This also catches the case where we have a cloned
1501 * extent that gets queued up multiple times during
1504 if (qgroup_oper_exists(fs_info, oper)) {
1510 ret = insert_qgroup_oper(fs_info, oper);
1512 /* Shouldn't happen so have an assert for developers */
1517 list_add_tail(&oper->list, &trans->qgroup_ref_list);
1520 btrfs_get_tree_mod_seq(fs_info, &oper->elem);
1525 static int qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1526 struct btrfs_qgroup_operation *oper)
1532 tmp = ulist_alloc(GFP_NOFS);
1536 spin_lock(&fs_info->qgroup_lock);
1537 if (!fs_info->quota_root)
1540 switch (oper->type) {
1541 case BTRFS_QGROUP_OPER_ADD_EXCL:
1544 case BTRFS_QGROUP_OPER_SUB_EXCL:
1550 ret = __qgroup_excl_accounting(fs_info, tmp, oper->ref_root,
1551 oper->num_bytes, sign);
1553 spin_unlock(&fs_info->qgroup_lock);
1559 * Walk all of the roots that pointed to our bytenr and adjust their refcnts as
1562 static int qgroup_calc_old_refcnt(struct btrfs_fs_info *fs_info,
1563 u64 root_to_skip, struct ulist *tmp,
1564 struct ulist *roots, struct ulist *qgroups,
1565 u64 seq, int *old_roots, int rescan)
1567 struct ulist_node *unode;
1568 struct ulist_iterator uiter;
1569 struct ulist_node *tmp_unode;
1570 struct ulist_iterator tmp_uiter;
1571 struct btrfs_qgroup *qg;
1574 ULIST_ITER_INIT(&uiter);
1575 while ((unode = ulist_next(roots, &uiter))) {
1576 /* We don't count our current root here */
1577 if (unode->val == root_to_skip)
1579 qg = find_qgroup_rb(fs_info, unode->val);
1583 * We could have a pending removal of this same ref so we may
1584 * not have actually found our ref root when doing
1585 * btrfs_find_all_roots, so we need to keep track of how many
1586 * old roots we find in case we removed ours and added a
1587 * different one at the same time. I don't think this could
1588 * happen in practice but that sort of thinking leads to pain
1589 * and suffering and to the dark side.
1594 ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
1598 ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg), GFP_ATOMIC);
1601 ULIST_ITER_INIT(&tmp_uiter);
1602 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1603 struct btrfs_qgroup_list *glist;
1605 qg = u64_to_ptr(tmp_unode->aux);
1607 * We use this sequence number to keep from having to
1608 * run the whole list and 0 out the refcnt every time.
1609 * We basically use sequnce as the known 0 count and
1610 * then add 1 everytime we see a qgroup. This is how we
1611 * get how many of the roots actually point up to the
1612 * upper level qgroups in order to determine exclusive
1615 * For rescan we want to set old_refcnt to seq so our
1616 * exclusive calculations end up correct.
1619 qg->old_refcnt = seq;
1620 else if (qg->old_refcnt < seq)
1621 qg->old_refcnt = seq + 1;
1625 if (qg->new_refcnt < seq)
1626 qg->new_refcnt = seq + 1;
1629 list_for_each_entry(glist, &qg->groups, next_group) {
1630 ret = ulist_add(qgroups, glist->group->qgroupid,
1631 ptr_to_u64(glist->group),
1635 ret = ulist_add(tmp, glist->group->qgroupid,
1636 ptr_to_u64(glist->group),
1647 * We need to walk forward in our operation tree and account for any roots that
1648 * were deleted after we made this operation.
1650 static int qgroup_account_deleted_refs(struct btrfs_fs_info *fs_info,
1651 struct btrfs_qgroup_operation *oper,
1653 struct ulist *qgroups, u64 seq,
1656 struct ulist_node *unode;
1657 struct ulist_iterator uiter;
1658 struct btrfs_qgroup *qg;
1659 struct btrfs_qgroup_operation *tmp_oper;
1666 * We only walk forward in the tree since we're only interested in
1667 * removals that happened _after_ our operation.
1669 spin_lock(&fs_info->qgroup_op_lock);
1670 n = rb_next(&oper->n);
1671 spin_unlock(&fs_info->qgroup_op_lock);
1674 tmp_oper = rb_entry(n, struct btrfs_qgroup_operation, n);
1675 while (tmp_oper->bytenr == oper->bytenr) {
1677 * If it's not a removal we don't care, additions work out
1678 * properly with our refcnt tracking.
1680 if (tmp_oper->type != BTRFS_QGROUP_OPER_SUB_SHARED &&
1681 tmp_oper->type != BTRFS_QGROUP_OPER_SUB_EXCL)
1683 qg = find_qgroup_rb(fs_info, tmp_oper->ref_root);
1686 ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
1692 * We only want to increase old_roots if this qgroup is
1693 * not already in the list of qgroups. If it is already
1694 * there then that means it must have been re-added or
1695 * the delete will be discarded because we had an
1696 * existing ref that we haven't looked up yet. In this
1697 * case we don't want to increase old_roots. So if ret
1698 * == 1 then we know that this is the first time we've
1699 * seen this qgroup and we can bump the old_roots.
1702 ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg),
1708 spin_lock(&fs_info->qgroup_op_lock);
1709 n = rb_next(&tmp_oper->n);
1710 spin_unlock(&fs_info->qgroup_op_lock);
1713 tmp_oper = rb_entry(n, struct btrfs_qgroup_operation, n);
1716 /* Ok now process the qgroups we found */
1717 ULIST_ITER_INIT(&uiter);
1718 while ((unode = ulist_next(tmp, &uiter))) {
1719 struct btrfs_qgroup_list *glist;
1721 qg = u64_to_ptr(unode->aux);
1722 if (qg->old_refcnt < seq)
1723 qg->old_refcnt = seq + 1;
1726 if (qg->new_refcnt < seq)
1727 qg->new_refcnt = seq + 1;
1730 list_for_each_entry(glist, &qg->groups, next_group) {
1731 ret = ulist_add(qgroups, glist->group->qgroupid,
1732 ptr_to_u64(glist->group), GFP_ATOMIC);
1735 ret = ulist_add(tmp, glist->group->qgroupid,
1736 ptr_to_u64(glist->group), GFP_ATOMIC);
1744 /* Add refcnt for the newly added reference. */
1745 static int qgroup_calc_new_refcnt(struct btrfs_fs_info *fs_info,
1746 struct btrfs_qgroup_operation *oper,
1747 struct btrfs_qgroup *qgroup,
1748 struct ulist *tmp, struct ulist *qgroups,
1751 struct ulist_node *unode;
1752 struct ulist_iterator uiter;
1753 struct btrfs_qgroup *qg;
1757 ret = ulist_add(qgroups, qgroup->qgroupid, ptr_to_u64(qgroup),
1761 ret = ulist_add(tmp, qgroup->qgroupid, ptr_to_u64(qgroup),
1765 ULIST_ITER_INIT(&uiter);
1766 while ((unode = ulist_next(tmp, &uiter))) {
1767 struct btrfs_qgroup_list *glist;
1769 qg = u64_to_ptr(unode->aux);
1770 if (oper->type == BTRFS_QGROUP_OPER_ADD_SHARED) {
1771 if (qg->new_refcnt < seq)
1772 qg->new_refcnt = seq + 1;
1776 if (qg->old_refcnt < seq)
1777 qg->old_refcnt = seq + 1;
1781 list_for_each_entry(glist, &qg->groups, next_group) {
1782 ret = ulist_add(tmp, glist->group->qgroupid,
1783 ptr_to_u64(glist->group), GFP_ATOMIC);
1786 ret = ulist_add(qgroups, glist->group->qgroupid,
1787 ptr_to_u64(glist->group), GFP_ATOMIC);
1796 * This adjusts the counters for all referenced qgroups if need be.
1798 static int qgroup_adjust_counters(struct btrfs_fs_info *fs_info,
1799 u64 root_to_skip, u64 num_bytes,
1800 struct ulist *qgroups, u64 seq,
1801 int old_roots, int new_roots, int rescan)
1803 struct ulist_node *unode;
1804 struct ulist_iterator uiter;
1805 struct btrfs_qgroup *qg;
1806 u64 cur_new_count, cur_old_count;
1808 ULIST_ITER_INIT(&uiter);
1809 while ((unode = ulist_next(qgroups, &uiter))) {
1812 qg = u64_to_ptr(unode->aux);
1814 * Wasn't referenced before but is now, add to the reference
1817 if (qg->old_refcnt <= seq && qg->new_refcnt > seq) {
1818 qg->rfer += num_bytes;
1819 qg->rfer_cmpr += num_bytes;
1824 * Was referenced before but isn't now, subtract from the
1825 * reference counters.
1827 if (qg->old_refcnt > seq && qg->new_refcnt <= seq) {
1828 qg->rfer -= num_bytes;
1829 qg->rfer_cmpr -= num_bytes;
1833 if (qg->old_refcnt < seq)
1836 cur_old_count = qg->old_refcnt - seq;
1837 if (qg->new_refcnt < seq)
1840 cur_new_count = qg->new_refcnt - seq;
1843 * If our refcount was the same as the roots previously but our
1844 * new count isn't the same as the number of roots now then we
1845 * went from having a exclusive reference on this range to not.
1847 if (old_roots && cur_old_count == old_roots &&
1848 (cur_new_count != new_roots || new_roots == 0)) {
1849 WARN_ON(cur_new_count != new_roots && new_roots == 0);
1850 qg->excl -= num_bytes;
1851 qg->excl_cmpr -= num_bytes;
1856 * If we didn't reference all the roots before but now we do we
1857 * have an exclusive reference to this range.
1859 if ((!old_roots || (old_roots && cur_old_count != old_roots))
1860 && cur_new_count == new_roots) {
1861 qg->excl += num_bytes;
1862 qg->excl_cmpr += num_bytes;
1867 qgroup_dirty(fs_info, qg);
1873 * If we removed a data extent and there were other references for that bytenr
1874 * then we need to lookup all referenced roots to make sure we still don't
1875 * reference this bytenr. If we do then we can just discard this operation.
1877 static int check_existing_refs(struct btrfs_trans_handle *trans,
1878 struct btrfs_fs_info *fs_info,
1879 struct btrfs_qgroup_operation *oper)
1881 struct ulist *roots = NULL;
1882 struct ulist_node *unode;
1883 struct ulist_iterator uiter;
1886 ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr,
1887 oper->elem.seq, &roots);
1892 ULIST_ITER_INIT(&uiter);
1893 while ((unode = ulist_next(roots, &uiter))) {
1894 if (unode->val == oper->ref_root) {
1900 btrfs_put_tree_mod_seq(fs_info, &oper->elem);
1906 * If we share a reference across multiple roots then we may need to adjust
1907 * various qgroups referenced and exclusive counters. The basic premise is this
1909 * 1) We have seq to represent a 0 count. Instead of looping through all of the
1910 * qgroups and resetting their refcount to 0 we just constantly bump this
1911 * sequence number to act as the base reference count. This means that if
1912 * anybody is equal to or below this sequence they were never referenced. We
1913 * jack this sequence up by the number of roots we found each time in order to
1914 * make sure we don't have any overlap.
1916 * 2) We first search all the roots that reference the area _except_ the root
1917 * we're acting on currently. This makes up the old_refcnt of all the qgroups
1920 * 3) We walk all of the qgroups referenced by the root we are currently acting
1921 * on, and will either adjust old_refcnt in the case of a removal or the
1922 * new_refcnt in the case of an addition.
1924 * 4) Finally we walk all the qgroups that are referenced by this range
1925 * including the root we are acting on currently. We will adjust the counters
1926 * based on the number of roots we had and will have after this operation.
1928 * Take this example as an illustration
1932 * [qg 0/0] [qg 0/1] [qg 0/2]
1936 * Say we are adding a reference that is covered by qg 0/0. The first step
1937 * would give a refcnt of 1 to qg 0/1 and 0/2 and a refcnt of 2 to qg 1/0 with
1938 * old_roots being 2. Because it is adding new_roots will be 1. We then go
1939 * through qg 0/0 which will get the new_refcnt set to 1 and add 1 to qg 1/0's
1940 * new_refcnt, bringing it to 3. We then walk through all of the qgroups, we
1941 * notice that the old refcnt for qg 0/0 < the new refcnt, so we added a
1942 * reference and thus must add the size to the referenced bytes. Everything
1943 * else is the same so nothing else changes.
1945 static int qgroup_shared_accounting(struct btrfs_trans_handle *trans,
1946 struct btrfs_fs_info *fs_info,
1947 struct btrfs_qgroup_operation *oper)
1949 struct ulist *roots = NULL;
1950 struct ulist *qgroups, *tmp;
1951 struct btrfs_qgroup *qgroup;
1952 struct seq_list elem = SEQ_LIST_INIT(elem);
1958 if (oper->elem.seq) {
1959 ret = check_existing_refs(trans, fs_info, oper);
1966 qgroups = ulist_alloc(GFP_NOFS);
1970 tmp = ulist_alloc(GFP_NOFS);
1972 ulist_free(qgroups);
1976 btrfs_get_tree_mod_seq(fs_info, &elem);
1977 ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr, elem.seq,
1979 btrfs_put_tree_mod_seq(fs_info, &elem);
1981 ulist_free(qgroups);
1985 spin_lock(&fs_info->qgroup_lock);
1986 qgroup = find_qgroup_rb(fs_info, oper->ref_root);
1989 seq = fs_info->qgroup_seq;
1992 * So roots is the list of all the roots currently pointing at the
1993 * bytenr, including the ref we are adding if we are adding, or not if
1994 * we are removing a ref. So we pass in the ref_root to skip that root
1995 * in our calculations. We set old_refnct and new_refcnt cause who the
1996 * hell knows what everything looked like before, and it doesn't matter
1999 ret = qgroup_calc_old_refcnt(fs_info, oper->ref_root, tmp, roots, qgroups,
2000 seq, &old_roots, 0);
2005 * Now adjust the refcounts of the qgroups that care about this
2006 * reference, either the old_count in the case of removal or new_count
2007 * in the case of an addition.
2009 ret = qgroup_calc_new_refcnt(fs_info, oper, qgroup, tmp, qgroups,
2015 * ...in the case of removals. If we had a removal before we got around
2016 * to processing this operation then we need to find that guy and count
2017 * his references as if they really existed so we don't end up screwing
2018 * up the exclusive counts. Then whenever we go to process the delete
2019 * everything will be grand and we can account for whatever exclusive
2020 * changes need to be made there. We also have to pass in old_roots so
2021 * we have an accurate count of the roots as it pertains to this
2022 * operations view of the world.
2024 ret = qgroup_account_deleted_refs(fs_info, oper, tmp, qgroups, seq,
2030 * We are adding our root, need to adjust up the number of roots,
2031 * otherwise old_roots is the number of roots we want.
2033 if (oper->type == BTRFS_QGROUP_OPER_ADD_SHARED) {
2034 new_roots = old_roots + 1;
2036 new_roots = old_roots;
2039 fs_info->qgroup_seq += old_roots + 1;
2043 * And now the magic happens, bless Arne for having a pretty elegant
2044 * solution for this.
2046 qgroup_adjust_counters(fs_info, oper->ref_root, oper->num_bytes,
2047 qgroups, seq, old_roots, new_roots, 0);
2049 spin_unlock(&fs_info->qgroup_lock);
2050 ulist_free(qgroups);
2057 * Process a reference to a shared subtree. This type of operation is
2058 * queued during snapshot removal when we encounter extents which are
2059 * shared between more than one root.
2061 static int qgroup_subtree_accounting(struct btrfs_trans_handle *trans,
2062 struct btrfs_fs_info *fs_info,
2063 struct btrfs_qgroup_operation *oper)
2065 struct ulist *roots = NULL;
2066 struct ulist_node *unode;
2067 struct ulist_iterator uiter;
2068 struct btrfs_qgroup_list *glist;
2069 struct ulist *parents;
2072 struct btrfs_qgroup *qg;
2074 struct seq_list elem = SEQ_LIST_INIT(elem);
2076 parents = ulist_alloc(GFP_NOFS);
2080 btrfs_get_tree_mod_seq(fs_info, &elem);
2081 ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr,
2083 btrfs_put_tree_mod_seq(fs_info, &elem);
2087 if (roots->nnodes != 1)
2090 ULIST_ITER_INIT(&uiter);
2091 unode = ulist_next(roots, &uiter); /* Only want 1 so no need to loop */
2093 * If we find our ref root then that means all refs
2094 * this extent has to the root have not yet been
2095 * deleted. In that case, we do nothing and let the
2096 * last ref for this bytenr drive our update.
2098 * This can happen for example if an extent is
2099 * referenced multiple times in a snapshot (clone,
2100 * etc). If we are in the middle of snapshot removal,
2101 * queued updates for such an extent will find the
2102 * root if we have not yet finished removing the
2105 if (unode->val == oper->ref_root)
2108 root_obj = unode->val;
2111 spin_lock(&fs_info->qgroup_lock);
2112 qg = find_qgroup_rb(fs_info, root_obj);
2116 qg->excl += oper->num_bytes;
2117 qg->excl_cmpr += oper->num_bytes;
2118 qgroup_dirty(fs_info, qg);
2121 * Adjust counts for parent groups. First we find all
2122 * parents, then in the 2nd loop we do the adjustment
2123 * while adding parents of the parents to our ulist.
2125 list_for_each_entry(glist, &qg->groups, next_group) {
2126 err = ulist_add(parents, glist->group->qgroupid,
2127 ptr_to_u64(glist->group), GFP_ATOMIC);
2134 ULIST_ITER_INIT(&uiter);
2135 while ((unode = ulist_next(parents, &uiter))) {
2136 qg = u64_to_ptr(unode->aux);
2137 qg->excl += oper->num_bytes;
2138 qg->excl_cmpr += oper->num_bytes;
2139 qgroup_dirty(fs_info, qg);
2141 /* Add any parents of the parents */
2142 list_for_each_entry(glist, &qg->groups, next_group) {
2143 err = ulist_add(parents, glist->group->qgroupid,
2144 ptr_to_u64(glist->group), GFP_ATOMIC);
2153 spin_unlock(&fs_info->qgroup_lock);
2157 ulist_free(parents);
2162 * btrfs_qgroup_account_ref is called for every ref that is added to or deleted
2163 * from the fs. First, all roots referencing the extent are searched, and
2164 * then the space is accounted accordingly to the different roots. The
2165 * accounting algorithm works in 3 steps documented inline.
2167 static int btrfs_qgroup_account(struct btrfs_trans_handle *trans,
2168 struct btrfs_fs_info *fs_info,
2169 struct btrfs_qgroup_operation *oper)
2173 if (!fs_info->quota_enabled)
2176 BUG_ON(!fs_info->quota_root);
2178 mutex_lock(&fs_info->qgroup_rescan_lock);
2179 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2180 if (fs_info->qgroup_rescan_progress.objectid <= oper->bytenr) {
2181 mutex_unlock(&fs_info->qgroup_rescan_lock);
2185 mutex_unlock(&fs_info->qgroup_rescan_lock);
2187 ASSERT(is_fstree(oper->ref_root));
2189 trace_btrfs_qgroup_account(oper);
2191 switch (oper->type) {
2192 case BTRFS_QGROUP_OPER_ADD_EXCL:
2193 case BTRFS_QGROUP_OPER_SUB_EXCL:
2194 ret = qgroup_excl_accounting(fs_info, oper);
2196 case BTRFS_QGROUP_OPER_ADD_SHARED:
2197 case BTRFS_QGROUP_OPER_SUB_SHARED:
2198 ret = qgroup_shared_accounting(trans, fs_info, oper);
2200 case BTRFS_QGROUP_OPER_SUB_SUBTREE:
2201 ret = qgroup_subtree_accounting(trans, fs_info, oper);
2210 * Needs to be called everytime we run delayed refs, even if there is an error
2211 * in order to cleanup outstanding operations.
2213 int btrfs_delayed_qgroup_accounting(struct btrfs_trans_handle *trans,
2214 struct btrfs_fs_info *fs_info)
2216 struct btrfs_qgroup_operation *oper;
2219 while (!list_empty(&trans->qgroup_ref_list)) {
2220 oper = list_first_entry(&trans->qgroup_ref_list,
2221 struct btrfs_qgroup_operation, list);
2222 list_del_init(&oper->list);
2223 if (!ret || !trans->aborted)
2224 ret = btrfs_qgroup_account(trans, fs_info, oper);
2225 spin_lock(&fs_info->qgroup_op_lock);
2226 rb_erase(&oper->n, &fs_info->qgroup_op_tree);
2227 spin_unlock(&fs_info->qgroup_op_lock);
2228 btrfs_put_tree_mod_seq(fs_info, &oper->elem);
2235 * called from commit_transaction. Writes all changed qgroups to disk.
2237 int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
2238 struct btrfs_fs_info *fs_info)
2240 struct btrfs_root *quota_root = fs_info->quota_root;
2242 int start_rescan_worker = 0;
2247 if (!fs_info->quota_enabled && fs_info->pending_quota_state)
2248 start_rescan_worker = 1;
2250 fs_info->quota_enabled = fs_info->pending_quota_state;
2252 spin_lock(&fs_info->qgroup_lock);
2253 while (!list_empty(&fs_info->dirty_qgroups)) {
2254 struct btrfs_qgroup *qgroup;
2255 qgroup = list_first_entry(&fs_info->dirty_qgroups,
2256 struct btrfs_qgroup, dirty);
2257 list_del_init(&qgroup->dirty);
2258 spin_unlock(&fs_info->qgroup_lock);
2259 ret = update_qgroup_info_item(trans, quota_root, qgroup);
2261 fs_info->qgroup_flags |=
2262 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2263 ret = update_qgroup_limit_item(trans, quota_root, qgroup);
2265 fs_info->qgroup_flags |=
2266 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2267 spin_lock(&fs_info->qgroup_lock);
2269 if (fs_info->quota_enabled)
2270 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
2272 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
2273 spin_unlock(&fs_info->qgroup_lock);
2275 ret = update_qgroup_status_item(trans, fs_info, quota_root);
2277 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2279 if (!ret && start_rescan_worker) {
2280 ret = qgroup_rescan_init(fs_info, 0, 1);
2282 qgroup_rescan_zero_tracking(fs_info);
2283 btrfs_queue_work(fs_info->qgroup_rescan_workers,
2284 &fs_info->qgroup_rescan_work);
2295 * copy the acounting information between qgroups. This is necessary when a
2296 * snapshot or a subvolume is created
2298 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
2299 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
2300 struct btrfs_qgroup_inherit *inherit)
2305 struct btrfs_root *quota_root = fs_info->quota_root;
2306 struct btrfs_qgroup *srcgroup;
2307 struct btrfs_qgroup *dstgroup;
2311 mutex_lock(&fs_info->qgroup_ioctl_lock);
2312 if (!fs_info->quota_enabled)
2321 i_qgroups = (u64 *)(inherit + 1);
2322 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
2323 2 * inherit->num_excl_copies;
2324 for (i = 0; i < nums; ++i) {
2325 srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
2331 if ((srcgroup->qgroupid >> 48) <= (objectid >> 48)) {
2340 * create a tracking group for the subvol itself
2342 ret = add_qgroup_item(trans, quota_root, objectid);
2347 struct btrfs_root *srcroot;
2348 struct btrfs_key srckey;
2350 srckey.objectid = srcid;
2351 srckey.type = BTRFS_ROOT_ITEM_KEY;
2352 srckey.offset = (u64)-1;
2353 srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
2354 if (IS_ERR(srcroot)) {
2355 ret = PTR_ERR(srcroot);
2360 level_size = srcroot->nodesize;
2365 * add qgroup to all inherited groups
2368 i_qgroups = (u64 *)(inherit + 1);
2369 for (i = 0; i < inherit->num_qgroups; ++i) {
2370 ret = add_qgroup_relation_item(trans, quota_root,
2371 objectid, *i_qgroups);
2374 ret = add_qgroup_relation_item(trans, quota_root,
2375 *i_qgroups, objectid);
2383 spin_lock(&fs_info->qgroup_lock);
2385 dstgroup = add_qgroup_rb(fs_info, objectid);
2386 if (IS_ERR(dstgroup)) {
2387 ret = PTR_ERR(dstgroup);
2391 if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
2392 dstgroup->lim_flags = inherit->lim.flags;
2393 dstgroup->max_rfer = inherit->lim.max_rfer;
2394 dstgroup->max_excl = inherit->lim.max_excl;
2395 dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
2396 dstgroup->rsv_excl = inherit->lim.rsv_excl;
2398 ret = update_qgroup_limit_item(trans, quota_root, dstgroup);
2400 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2401 btrfs_info(fs_info, "unable to update quota limit for %llu",
2402 dstgroup->qgroupid);
2408 srcgroup = find_qgroup_rb(fs_info, srcid);
2413 * We call inherit after we clone the root in order to make sure
2414 * our counts don't go crazy, so at this point the only
2415 * difference between the two roots should be the root node.
2417 dstgroup->rfer = srcgroup->rfer;
2418 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
2419 dstgroup->excl = level_size;
2420 dstgroup->excl_cmpr = level_size;
2421 srcgroup->excl = level_size;
2422 srcgroup->excl_cmpr = level_size;
2424 /* inherit the limit info */
2425 dstgroup->lim_flags = srcgroup->lim_flags;
2426 dstgroup->max_rfer = srcgroup->max_rfer;
2427 dstgroup->max_excl = srcgroup->max_excl;
2428 dstgroup->rsv_rfer = srcgroup->rsv_rfer;
2429 dstgroup->rsv_excl = srcgroup->rsv_excl;
2431 qgroup_dirty(fs_info, dstgroup);
2432 qgroup_dirty(fs_info, srcgroup);
2438 i_qgroups = (u64 *)(inherit + 1);
2439 for (i = 0; i < inherit->num_qgroups; ++i) {
2440 ret = add_relation_rb(quota_root->fs_info, objectid,
2447 for (i = 0; i < inherit->num_ref_copies; ++i) {
2448 struct btrfs_qgroup *src;
2449 struct btrfs_qgroup *dst;
2451 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2452 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2459 dst->rfer = src->rfer - level_size;
2460 dst->rfer_cmpr = src->rfer_cmpr - level_size;
2463 for (i = 0; i < inherit->num_excl_copies; ++i) {
2464 struct btrfs_qgroup *src;
2465 struct btrfs_qgroup *dst;
2467 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2468 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2475 dst->excl = src->excl + level_size;
2476 dst->excl_cmpr = src->excl_cmpr + level_size;
2481 spin_unlock(&fs_info->qgroup_lock);
2483 mutex_unlock(&fs_info->qgroup_ioctl_lock);
2487 int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
2489 struct btrfs_root *quota_root;
2490 struct btrfs_qgroup *qgroup;
2491 struct btrfs_fs_info *fs_info = root->fs_info;
2492 u64 ref_root = root->root_key.objectid;
2494 struct ulist_node *unode;
2495 struct ulist_iterator uiter;
2497 if (!is_fstree(ref_root))
2503 spin_lock(&fs_info->qgroup_lock);
2504 quota_root = fs_info->quota_root;
2508 qgroup = find_qgroup_rb(fs_info, ref_root);
2513 * in a first step, we check all affected qgroups if any limits would
2516 ulist_reinit(fs_info->qgroup_ulist);
2517 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2518 (uintptr_t)qgroup, GFP_ATOMIC);
2521 ULIST_ITER_INIT(&uiter);
2522 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2523 struct btrfs_qgroup *qg;
2524 struct btrfs_qgroup_list *glist;
2526 qg = u64_to_ptr(unode->aux);
2528 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
2529 qg->reserved + (s64)qg->rfer + num_bytes >
2535 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
2536 qg->reserved + (s64)qg->excl + num_bytes >
2542 list_for_each_entry(glist, &qg->groups, next_group) {
2543 ret = ulist_add(fs_info->qgroup_ulist,
2544 glist->group->qgroupid,
2545 (uintptr_t)glist->group, GFP_ATOMIC);
2552 * no limits exceeded, now record the reservation into all qgroups
2554 ULIST_ITER_INIT(&uiter);
2555 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2556 struct btrfs_qgroup *qg;
2558 qg = u64_to_ptr(unode->aux);
2560 qg->reserved += num_bytes;
2564 spin_unlock(&fs_info->qgroup_lock);
2568 void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
2570 struct btrfs_root *quota_root;
2571 struct btrfs_qgroup *qgroup;
2572 struct btrfs_fs_info *fs_info = root->fs_info;
2573 struct ulist_node *unode;
2574 struct ulist_iterator uiter;
2575 u64 ref_root = root->root_key.objectid;
2578 if (!is_fstree(ref_root))
2584 spin_lock(&fs_info->qgroup_lock);
2586 quota_root = fs_info->quota_root;
2590 qgroup = find_qgroup_rb(fs_info, ref_root);
2594 ulist_reinit(fs_info->qgroup_ulist);
2595 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2596 (uintptr_t)qgroup, GFP_ATOMIC);
2599 ULIST_ITER_INIT(&uiter);
2600 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2601 struct btrfs_qgroup *qg;
2602 struct btrfs_qgroup_list *glist;
2604 qg = u64_to_ptr(unode->aux);
2606 qg->reserved -= num_bytes;
2608 list_for_each_entry(glist, &qg->groups, next_group) {
2609 ret = ulist_add(fs_info->qgroup_ulist,
2610 glist->group->qgroupid,
2611 (uintptr_t)glist->group, GFP_ATOMIC);
2618 spin_unlock(&fs_info->qgroup_lock);
2621 void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
2623 if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
2625 btrfs_err(trans->root->fs_info,
2626 "qgroups not uptodate in trans handle %p: list is%s empty, "
2628 trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
2629 (u32)(trans->delayed_ref_elem.seq >> 32),
2630 (u32)trans->delayed_ref_elem.seq);
2635 * returns < 0 on error, 0 when more leafs are to be scanned.
2636 * returns 1 when done.
2639 qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
2640 struct btrfs_trans_handle *trans, struct ulist *qgroups,
2641 struct ulist *tmp, struct extent_buffer *scratch_leaf)
2643 struct btrfs_key found;
2644 struct ulist *roots = NULL;
2645 struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
2652 path->leave_spinning = 1;
2653 mutex_lock(&fs_info->qgroup_rescan_lock);
2654 ret = btrfs_search_slot_for_read(fs_info->extent_root,
2655 &fs_info->qgroup_rescan_progress,
2658 pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
2659 fs_info->qgroup_rescan_progress.objectid,
2660 fs_info->qgroup_rescan_progress.type,
2661 fs_info->qgroup_rescan_progress.offset, ret);
2665 * The rescan is about to end, we will not be scanning any
2666 * further blocks. We cannot unset the RESCAN flag here, because
2667 * we want to commit the transaction if everything went well.
2668 * To make the live accounting work in this phase, we set our
2669 * scan progress pointer such that every real extent objectid
2672 fs_info->qgroup_rescan_progress.objectid = (u64)-1;
2673 btrfs_release_path(path);
2674 mutex_unlock(&fs_info->qgroup_rescan_lock);
2678 btrfs_item_key_to_cpu(path->nodes[0], &found,
2679 btrfs_header_nritems(path->nodes[0]) - 1);
2680 fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
2682 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2683 memcpy(scratch_leaf, path->nodes[0], sizeof(*scratch_leaf));
2684 slot = path->slots[0];
2685 btrfs_release_path(path);
2686 mutex_unlock(&fs_info->qgroup_rescan_lock);
2688 for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
2689 btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
2690 if (found.type != BTRFS_EXTENT_ITEM_KEY &&
2691 found.type != BTRFS_METADATA_ITEM_KEY)
2693 if (found.type == BTRFS_METADATA_ITEM_KEY)
2694 num_bytes = fs_info->extent_root->nodesize;
2696 num_bytes = found.offset;
2698 ulist_reinit(qgroups);
2699 ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
2703 spin_lock(&fs_info->qgroup_lock);
2704 seq = fs_info->qgroup_seq;
2705 fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
2708 ret = qgroup_calc_old_refcnt(fs_info, 0, tmp, roots, qgroups,
2709 seq, &new_roots, 1);
2711 spin_unlock(&fs_info->qgroup_lock);
2716 ret = qgroup_adjust_counters(fs_info, 0, num_bytes, qgroups,
2717 seq, 0, new_roots, 1);
2719 spin_unlock(&fs_info->qgroup_lock);
2723 spin_unlock(&fs_info->qgroup_lock);
2727 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2732 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
2734 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
2735 qgroup_rescan_work);
2736 struct btrfs_path *path;
2737 struct btrfs_trans_handle *trans = NULL;
2738 struct ulist *tmp = NULL, *qgroups = NULL;
2739 struct extent_buffer *scratch_leaf = NULL;
2743 path = btrfs_alloc_path();
2746 qgroups = ulist_alloc(GFP_NOFS);
2749 tmp = ulist_alloc(GFP_NOFS);
2752 scratch_leaf = kmalloc(sizeof(*scratch_leaf), GFP_NOFS);
2758 trans = btrfs_start_transaction(fs_info->fs_root, 0);
2759 if (IS_ERR(trans)) {
2760 err = PTR_ERR(trans);
2763 if (!fs_info->quota_enabled) {
2766 err = qgroup_rescan_leaf(fs_info, path, trans,
2767 qgroups, tmp, scratch_leaf);
2770 btrfs_commit_transaction(trans, fs_info->fs_root);
2772 btrfs_end_transaction(trans, fs_info->fs_root);
2776 kfree(scratch_leaf);
2777 ulist_free(qgroups);
2779 btrfs_free_path(path);
2781 mutex_lock(&fs_info->qgroup_rescan_lock);
2782 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2785 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
2786 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2787 } else if (err < 0) {
2788 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2790 mutex_unlock(&fs_info->qgroup_rescan_lock);
2793 * only update status, since the previous part has alreay updated the
2796 trans = btrfs_start_transaction(fs_info->quota_root, 1);
2797 if (IS_ERR(trans)) {
2798 err = PTR_ERR(trans);
2800 "fail to start transaction for status update: %d\n",
2804 ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root);
2807 btrfs_err(fs_info, "fail to update qgroup status: %d\n", err);
2809 btrfs_end_transaction(trans, fs_info->quota_root);
2812 btrfs_info(fs_info, "qgroup scan completed%s",
2813 err > 0 ? " (inconsistency flag cleared)" : "");
2815 btrfs_err(fs_info, "qgroup scan failed with %d", err);
2819 complete_all(&fs_info->qgroup_rescan_completion);
2823 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
2824 * memory required for the rescan context.
2827 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
2833 (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ||
2834 !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) {
2839 mutex_lock(&fs_info->qgroup_rescan_lock);
2840 spin_lock(&fs_info->qgroup_lock);
2843 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2845 else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
2849 spin_unlock(&fs_info->qgroup_lock);
2850 mutex_unlock(&fs_info->qgroup_rescan_lock);
2853 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2856 memset(&fs_info->qgroup_rescan_progress, 0,
2857 sizeof(fs_info->qgroup_rescan_progress));
2858 fs_info->qgroup_rescan_progress.objectid = progress_objectid;
2860 spin_unlock(&fs_info->qgroup_lock);
2861 mutex_unlock(&fs_info->qgroup_rescan_lock);
2863 init_completion(&fs_info->qgroup_rescan_completion);
2865 memset(&fs_info->qgroup_rescan_work, 0,
2866 sizeof(fs_info->qgroup_rescan_work));
2867 btrfs_init_work(&fs_info->qgroup_rescan_work,
2868 btrfs_qgroup_rescan_helper,
2869 btrfs_qgroup_rescan_worker, NULL, NULL);
2873 btrfs_info(fs_info, "qgroup_rescan_init failed with %d", ret);
2881 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
2884 struct btrfs_qgroup *qgroup;
2886 spin_lock(&fs_info->qgroup_lock);
2887 /* clear all current qgroup tracking information */
2888 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
2889 qgroup = rb_entry(n, struct btrfs_qgroup, node);
2891 qgroup->rfer_cmpr = 0;
2893 qgroup->excl_cmpr = 0;
2895 spin_unlock(&fs_info->qgroup_lock);
2899 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
2902 struct btrfs_trans_handle *trans;
2904 ret = qgroup_rescan_init(fs_info, 0, 1);
2909 * We have set the rescan_progress to 0, which means no more
2910 * delayed refs will be accounted by btrfs_qgroup_account_ref.
2911 * However, btrfs_qgroup_account_ref may be right after its call
2912 * to btrfs_find_all_roots, in which case it would still do the
2914 * To solve this, we're committing the transaction, which will
2915 * ensure we run all delayed refs and only after that, we are
2916 * going to clear all tracking information for a clean start.
2919 trans = btrfs_join_transaction(fs_info->fs_root);
2920 if (IS_ERR(trans)) {
2921 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2922 return PTR_ERR(trans);
2924 ret = btrfs_commit_transaction(trans, fs_info->fs_root);
2926 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2930 qgroup_rescan_zero_tracking(fs_info);
2932 btrfs_queue_work(fs_info->qgroup_rescan_workers,
2933 &fs_info->qgroup_rescan_work);
2938 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info)
2943 mutex_lock(&fs_info->qgroup_rescan_lock);
2944 spin_lock(&fs_info->qgroup_lock);
2945 running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2946 spin_unlock(&fs_info->qgroup_lock);
2947 mutex_unlock(&fs_info->qgroup_rescan_lock);
2950 ret = wait_for_completion_interruptible(
2951 &fs_info->qgroup_rescan_completion);
2957 * this is only called from open_ctree where we're still single threaded, thus
2958 * locking is omitted here.
2961 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
2963 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2964 btrfs_queue_work(fs_info->qgroup_rescan_workers,
2965 &fs_info->qgroup_rescan_work);