2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include <linux/btrfs.h>
29 #include "transaction.h"
36 * - subvol delete -> delete when ref goes to 0? delete limits also?
41 * - copy also limits on subvol creation
43 * - caches fuer ulists
44 * - performance benchmarks
45 * - check all ioctl parameters
49 * one struct for each qgroup, organized in fs_info->qgroup_tree.
57 u64 rfer; /* referenced */
58 u64 rfer_cmpr; /* referenced compressed */
59 u64 excl; /* exclusive */
60 u64 excl_cmpr; /* exclusive compressed */
65 u64 lim_flags; /* which limits are set */
72 * reservation tracking
79 struct list_head groups; /* groups this group is member of */
80 struct list_head members; /* groups that are members of this group */
81 struct list_head dirty; /* dirty groups */
82 struct rb_node node; /* tree of qgroups */
85 * temp variables for accounting operations
92 * glue structure to represent the relations between qgroups.
94 struct btrfs_qgroup_list {
95 struct list_head next_group;
96 struct list_head next_member;
97 struct btrfs_qgroup *group;
98 struct btrfs_qgroup *member;
101 /* must be called with qgroup_lock held */
102 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
105 struct rb_node *n = fs_info->qgroup_tree.rb_node;
106 struct btrfs_qgroup *qgroup;
109 qgroup = rb_entry(n, struct btrfs_qgroup, node);
110 if (qgroup->qgroupid < qgroupid)
112 else if (qgroup->qgroupid > qgroupid)
120 /* must be called with qgroup_lock held */
121 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
124 struct rb_node **p = &fs_info->qgroup_tree.rb_node;
125 struct rb_node *parent = NULL;
126 struct btrfs_qgroup *qgroup;
130 qgroup = rb_entry(parent, struct btrfs_qgroup, node);
132 if (qgroup->qgroupid < qgroupid)
134 else if (qgroup->qgroupid > qgroupid)
140 qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
142 return ERR_PTR(-ENOMEM);
144 qgroup->qgroupid = qgroupid;
145 INIT_LIST_HEAD(&qgroup->groups);
146 INIT_LIST_HEAD(&qgroup->members);
147 INIT_LIST_HEAD(&qgroup->dirty);
149 rb_link_node(&qgroup->node, parent, p);
150 rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
155 /* must be called with qgroup_lock held */
156 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
158 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
159 struct btrfs_qgroup_list *list;
164 rb_erase(&qgroup->node, &fs_info->qgroup_tree);
165 list_del(&qgroup->dirty);
167 while (!list_empty(&qgroup->groups)) {
168 list = list_first_entry(&qgroup->groups,
169 struct btrfs_qgroup_list, next_group);
170 list_del(&list->next_group);
171 list_del(&list->next_member);
175 while (!list_empty(&qgroup->members)) {
176 list = list_first_entry(&qgroup->members,
177 struct btrfs_qgroup_list, next_member);
178 list_del(&list->next_group);
179 list_del(&list->next_member);
187 /* must be called with qgroup_lock held */
188 static int add_relation_rb(struct btrfs_fs_info *fs_info,
189 u64 memberid, u64 parentid)
191 struct btrfs_qgroup *member;
192 struct btrfs_qgroup *parent;
193 struct btrfs_qgroup_list *list;
195 member = find_qgroup_rb(fs_info, memberid);
196 parent = find_qgroup_rb(fs_info, parentid);
197 if (!member || !parent)
200 list = kzalloc(sizeof(*list), GFP_ATOMIC);
204 list->group = parent;
205 list->member = member;
206 list_add_tail(&list->next_group, &member->groups);
207 list_add_tail(&list->next_member, &parent->members);
212 /* must be called with qgroup_lock held */
213 static int del_relation_rb(struct btrfs_fs_info *fs_info,
214 u64 memberid, u64 parentid)
216 struct btrfs_qgroup *member;
217 struct btrfs_qgroup *parent;
218 struct btrfs_qgroup_list *list;
220 member = find_qgroup_rb(fs_info, memberid);
221 parent = find_qgroup_rb(fs_info, parentid);
222 if (!member || !parent)
225 list_for_each_entry(list, &member->groups, next_group) {
226 if (list->group == parent) {
227 list_del(&list->next_group);
228 list_del(&list->next_member);
237 * The full config is read in one go, only called from open_ctree()
238 * It doesn't use any locking, as at this point we're still single-threaded
240 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
242 struct btrfs_key key;
243 struct btrfs_key found_key;
244 struct btrfs_root *quota_root = fs_info->quota_root;
245 struct btrfs_path *path = NULL;
246 struct extent_buffer *l;
251 if (!fs_info->quota_enabled)
254 path = btrfs_alloc_path();
260 /* default this to quota off, in case no status key is found */
261 fs_info->qgroup_flags = 0;
264 * pass 1: read status, all qgroup infos and limits
269 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
274 struct btrfs_qgroup *qgroup;
276 slot = path->slots[0];
278 btrfs_item_key_to_cpu(l, &found_key, slot);
280 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
281 struct btrfs_qgroup_status_item *ptr;
283 ptr = btrfs_item_ptr(l, slot,
284 struct btrfs_qgroup_status_item);
286 if (btrfs_qgroup_status_version(l, ptr) !=
287 BTRFS_QGROUP_STATUS_VERSION) {
289 "btrfs: old qgroup version, quota disabled\n");
292 if (btrfs_qgroup_status_generation(l, ptr) !=
293 fs_info->generation) {
294 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
296 "btrfs: qgroup generation mismatch, "
297 "marked as inconsistent\n");
299 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
301 /* FIXME read scan element */
305 if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
306 found_key.type != BTRFS_QGROUP_LIMIT_KEY)
309 qgroup = find_qgroup_rb(fs_info, found_key.offset);
310 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
311 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
312 printk(KERN_ERR "btrfs: inconsitent qgroup config\n");
313 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
316 qgroup = add_qgroup_rb(fs_info, found_key.offset);
317 if (IS_ERR(qgroup)) {
318 ret = PTR_ERR(qgroup);
322 switch (found_key.type) {
323 case BTRFS_QGROUP_INFO_KEY: {
324 struct btrfs_qgroup_info_item *ptr;
326 ptr = btrfs_item_ptr(l, slot,
327 struct btrfs_qgroup_info_item);
328 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
329 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
330 qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
331 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
332 /* generation currently unused */
335 case BTRFS_QGROUP_LIMIT_KEY: {
336 struct btrfs_qgroup_limit_item *ptr;
338 ptr = btrfs_item_ptr(l, slot,
339 struct btrfs_qgroup_limit_item);
340 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
341 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
342 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
343 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
344 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
349 ret = btrfs_next_item(quota_root, path);
355 btrfs_release_path(path);
358 * pass 2: read all qgroup relations
361 key.type = BTRFS_QGROUP_RELATION_KEY;
363 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
367 slot = path->slots[0];
369 btrfs_item_key_to_cpu(l, &found_key, slot);
371 if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
374 if (found_key.objectid > found_key.offset) {
375 /* parent <- member, not needed to build config */
376 /* FIXME should we omit the key completely? */
380 ret = add_relation_rb(fs_info, found_key.objectid,
382 if (ret == -ENOENT) {
384 "btrfs: orphan qgroup relation 0x%llx->0x%llx\n",
385 (unsigned long long)found_key.objectid,
386 (unsigned long long)found_key.offset);
387 ret = 0; /* ignore the error */
392 ret = btrfs_next_item(quota_root, path);
399 fs_info->qgroup_flags |= flags;
400 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
401 fs_info->quota_enabled = 0;
402 fs_info->pending_quota_state = 0;
404 btrfs_free_path(path);
406 return ret < 0 ? ret : 0;
410 * This is only called from close_ctree() or open_ctree(), both in single-
411 * treaded paths. Clean up the in-memory structures. No locking needed.
413 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
416 struct btrfs_qgroup *qgroup;
417 struct btrfs_qgroup_list *list;
419 while ((n = rb_first(&fs_info->qgroup_tree))) {
420 qgroup = rb_entry(n, struct btrfs_qgroup, node);
421 rb_erase(n, &fs_info->qgroup_tree);
423 while (!list_empty(&qgroup->groups)) {
424 list = list_first_entry(&qgroup->groups,
425 struct btrfs_qgroup_list,
427 list_del(&list->next_group);
428 list_del(&list->next_member);
432 while (!list_empty(&qgroup->members)) {
433 list = list_first_entry(&qgroup->members,
434 struct btrfs_qgroup_list,
436 list_del(&list->next_group);
437 list_del(&list->next_member);
444 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
445 struct btrfs_root *quota_root,
449 struct btrfs_path *path;
450 struct btrfs_key key;
452 path = btrfs_alloc_path();
457 key.type = BTRFS_QGROUP_RELATION_KEY;
460 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
462 btrfs_mark_buffer_dirty(path->nodes[0]);
464 btrfs_free_path(path);
468 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
469 struct btrfs_root *quota_root,
473 struct btrfs_path *path;
474 struct btrfs_key key;
476 path = btrfs_alloc_path();
481 key.type = BTRFS_QGROUP_RELATION_KEY;
484 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
493 ret = btrfs_del_item(trans, quota_root, path);
495 btrfs_free_path(path);
499 static int add_qgroup_item(struct btrfs_trans_handle *trans,
500 struct btrfs_root *quota_root, u64 qgroupid)
503 struct btrfs_path *path;
504 struct btrfs_qgroup_info_item *qgroup_info;
505 struct btrfs_qgroup_limit_item *qgroup_limit;
506 struct extent_buffer *leaf;
507 struct btrfs_key key;
509 path = btrfs_alloc_path();
514 key.type = BTRFS_QGROUP_INFO_KEY;
515 key.offset = qgroupid;
517 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
518 sizeof(*qgroup_info));
522 leaf = path->nodes[0];
523 qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
524 struct btrfs_qgroup_info_item);
525 btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
526 btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
527 btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
528 btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
529 btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
531 btrfs_mark_buffer_dirty(leaf);
533 btrfs_release_path(path);
535 key.type = BTRFS_QGROUP_LIMIT_KEY;
536 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
537 sizeof(*qgroup_limit));
541 leaf = path->nodes[0];
542 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
543 struct btrfs_qgroup_limit_item);
544 btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
545 btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
546 btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
547 btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
548 btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
550 btrfs_mark_buffer_dirty(leaf);
554 btrfs_free_path(path);
558 static int del_qgroup_item(struct btrfs_trans_handle *trans,
559 struct btrfs_root *quota_root, u64 qgroupid)
562 struct btrfs_path *path;
563 struct btrfs_key key;
565 path = btrfs_alloc_path();
570 key.type = BTRFS_QGROUP_INFO_KEY;
571 key.offset = qgroupid;
572 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
581 ret = btrfs_del_item(trans, quota_root, path);
585 btrfs_release_path(path);
587 key.type = BTRFS_QGROUP_LIMIT_KEY;
588 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
597 ret = btrfs_del_item(trans, quota_root, path);
600 btrfs_free_path(path);
604 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
605 struct btrfs_root *root, u64 qgroupid,
606 u64 flags, u64 max_rfer, u64 max_excl,
607 u64 rsv_rfer, u64 rsv_excl)
609 struct btrfs_path *path;
610 struct btrfs_key key;
611 struct extent_buffer *l;
612 struct btrfs_qgroup_limit_item *qgroup_limit;
617 key.type = BTRFS_QGROUP_LIMIT_KEY;
618 key.offset = qgroupid;
620 path = btrfs_alloc_path();
624 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
632 slot = path->slots[0];
633 qgroup_limit = btrfs_item_ptr(l, path->slots[0],
634 struct btrfs_qgroup_limit_item);
635 btrfs_set_qgroup_limit_flags(l, qgroup_limit, flags);
636 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, max_rfer);
637 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, max_excl);
638 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, rsv_rfer);
639 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, rsv_excl);
641 btrfs_mark_buffer_dirty(l);
644 btrfs_free_path(path);
648 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
649 struct btrfs_root *root,
650 struct btrfs_qgroup *qgroup)
652 struct btrfs_path *path;
653 struct btrfs_key key;
654 struct extent_buffer *l;
655 struct btrfs_qgroup_info_item *qgroup_info;
660 key.type = BTRFS_QGROUP_INFO_KEY;
661 key.offset = qgroup->qgroupid;
663 path = btrfs_alloc_path();
667 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
675 slot = path->slots[0];
676 qgroup_info = btrfs_item_ptr(l, path->slots[0],
677 struct btrfs_qgroup_info_item);
678 btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
679 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
680 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
681 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
682 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
684 btrfs_mark_buffer_dirty(l);
687 btrfs_free_path(path);
691 static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
692 struct btrfs_fs_info *fs_info,
693 struct btrfs_root *root)
695 struct btrfs_path *path;
696 struct btrfs_key key;
697 struct extent_buffer *l;
698 struct btrfs_qgroup_status_item *ptr;
703 key.type = BTRFS_QGROUP_STATUS_KEY;
706 path = btrfs_alloc_path();
710 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
718 slot = path->slots[0];
719 ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
720 btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
721 btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
724 btrfs_mark_buffer_dirty(l);
727 btrfs_free_path(path);
732 * called with qgroup_lock held
734 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
735 struct btrfs_root *root)
737 struct btrfs_path *path;
738 struct btrfs_key key;
739 struct extent_buffer *leaf = NULL;
743 path = btrfs_alloc_path();
747 path->leave_spinning = 1;
754 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
757 leaf = path->nodes[0];
758 nr = btrfs_header_nritems(leaf);
762 * delete the leaf one by one
763 * since the whole tree is going
767 ret = btrfs_del_items(trans, root, path, 0, nr);
771 btrfs_release_path(path);
775 root->fs_info->pending_quota_state = 0;
776 btrfs_free_path(path);
780 int btrfs_quota_enable(struct btrfs_trans_handle *trans,
781 struct btrfs_fs_info *fs_info)
783 struct btrfs_root *quota_root;
784 struct btrfs_root *tree_root = fs_info->tree_root;
785 struct btrfs_path *path = NULL;
786 struct btrfs_qgroup_status_item *ptr;
787 struct extent_buffer *leaf;
788 struct btrfs_key key;
789 struct btrfs_key found_key;
790 struct btrfs_qgroup *qgroup = NULL;
794 mutex_lock(&fs_info->qgroup_ioctl_lock);
795 spin_lock(&fs_info->qgroup_lock);
796 if (fs_info->quota_root) {
797 fs_info->pending_quota_state = 1;
798 spin_unlock(&fs_info->qgroup_lock);
801 spin_unlock(&fs_info->qgroup_lock);
804 * initially create the quota tree
806 quota_root = btrfs_create_tree(trans, fs_info,
807 BTRFS_QUOTA_TREE_OBJECTID);
808 if (IS_ERR(quota_root)) {
809 ret = PTR_ERR(quota_root);
813 path = btrfs_alloc_path();
820 key.type = BTRFS_QGROUP_STATUS_KEY;
823 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
828 leaf = path->nodes[0];
829 ptr = btrfs_item_ptr(leaf, path->slots[0],
830 struct btrfs_qgroup_status_item);
831 btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
832 btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
833 fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
834 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
835 btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
836 btrfs_set_qgroup_status_scan(leaf, ptr, 0);
838 btrfs_mark_buffer_dirty(leaf);
841 key.type = BTRFS_ROOT_REF_KEY;
844 btrfs_release_path(path);
845 ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
853 slot = path->slots[0];
854 leaf = path->nodes[0];
855 btrfs_item_key_to_cpu(leaf, &found_key, slot);
857 if (found_key.type == BTRFS_ROOT_REF_KEY) {
858 ret = add_qgroup_item(trans, quota_root,
863 spin_lock(&fs_info->qgroup_lock);
864 qgroup = add_qgroup_rb(fs_info, found_key.offset);
865 if (IS_ERR(qgroup)) {
866 spin_unlock(&fs_info->qgroup_lock);
867 ret = PTR_ERR(qgroup);
870 spin_unlock(&fs_info->qgroup_lock);
872 ret = btrfs_next_item(tree_root, path);
880 btrfs_release_path(path);
881 ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
885 spin_lock(&fs_info->qgroup_lock);
886 qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
887 if (IS_ERR(qgroup)) {
888 spin_unlock(&fs_info->qgroup_lock);
889 ret = PTR_ERR(qgroup);
892 fs_info->quota_root = quota_root;
893 fs_info->pending_quota_state = 1;
894 spin_unlock(&fs_info->qgroup_lock);
896 btrfs_free_path(path);
899 free_extent_buffer(quota_root->node);
900 free_extent_buffer(quota_root->commit_root);
904 mutex_unlock(&fs_info->qgroup_ioctl_lock);
908 int btrfs_quota_disable(struct btrfs_trans_handle *trans,
909 struct btrfs_fs_info *fs_info)
911 struct btrfs_root *tree_root = fs_info->tree_root;
912 struct btrfs_root *quota_root;
915 mutex_lock(&fs_info->qgroup_ioctl_lock);
916 spin_lock(&fs_info->qgroup_lock);
917 if (!fs_info->quota_root) {
918 spin_unlock(&fs_info->qgroup_lock);
921 fs_info->quota_enabled = 0;
922 fs_info->pending_quota_state = 0;
923 quota_root = fs_info->quota_root;
924 fs_info->quota_root = NULL;
925 btrfs_free_qgroup_config(fs_info);
926 spin_unlock(&fs_info->qgroup_lock);
933 ret = btrfs_clean_quota_tree(trans, quota_root);
937 ret = btrfs_del_root(trans, tree_root, "a_root->root_key);
941 list_del("a_root->dirty_list);
943 btrfs_tree_lock(quota_root->node);
944 clean_tree_block(trans, tree_root, quota_root->node);
945 btrfs_tree_unlock(quota_root->node);
946 btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
948 free_extent_buffer(quota_root->node);
949 free_extent_buffer(quota_root->commit_root);
952 mutex_unlock(&fs_info->qgroup_ioctl_lock);
956 int btrfs_quota_rescan(struct btrfs_fs_info *fs_info)
962 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
963 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
965 struct btrfs_root *quota_root;
968 mutex_lock(&fs_info->qgroup_ioctl_lock);
969 quota_root = fs_info->quota_root;
975 ret = add_qgroup_relation_item(trans, quota_root, src, dst);
979 ret = add_qgroup_relation_item(trans, quota_root, dst, src);
981 del_qgroup_relation_item(trans, quota_root, src, dst);
985 spin_lock(&fs_info->qgroup_lock);
986 ret = add_relation_rb(quota_root->fs_info, src, dst);
987 spin_unlock(&fs_info->qgroup_lock);
989 mutex_unlock(&fs_info->qgroup_ioctl_lock);
993 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
994 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
996 struct btrfs_root *quota_root;
1000 mutex_lock(&fs_info->qgroup_ioctl_lock);
1001 quota_root = fs_info->quota_root;
1007 ret = del_qgroup_relation_item(trans, quota_root, src, dst);
1008 err = del_qgroup_relation_item(trans, quota_root, dst, src);
1012 spin_lock(&fs_info->qgroup_lock);
1013 del_relation_rb(fs_info, src, dst);
1015 spin_unlock(&fs_info->qgroup_lock);
1017 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1021 int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
1022 struct btrfs_fs_info *fs_info, u64 qgroupid, char *name)
1024 struct btrfs_root *quota_root;
1025 struct btrfs_qgroup *qgroup;
1028 mutex_lock(&fs_info->qgroup_ioctl_lock);
1029 quota_root = fs_info->quota_root;
1035 ret = add_qgroup_item(trans, quota_root, qgroupid);
1037 spin_lock(&fs_info->qgroup_lock);
1038 qgroup = add_qgroup_rb(fs_info, qgroupid);
1039 spin_unlock(&fs_info->qgroup_lock);
1042 ret = PTR_ERR(qgroup);
1044 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1048 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
1049 struct btrfs_fs_info *fs_info, u64 qgroupid)
1051 struct btrfs_root *quota_root;
1052 struct btrfs_qgroup *qgroup;
1055 mutex_lock(&fs_info->qgroup_ioctl_lock);
1056 quota_root = fs_info->quota_root;
1062 /* check if there are no relations to this qgroup */
1063 spin_lock(&fs_info->qgroup_lock);
1064 qgroup = find_qgroup_rb(fs_info, qgroupid);
1066 if (!list_empty(&qgroup->groups) || !list_empty(&qgroup->members)) {
1067 spin_unlock(&fs_info->qgroup_lock);
1072 spin_unlock(&fs_info->qgroup_lock);
1074 ret = del_qgroup_item(trans, quota_root, qgroupid);
1076 spin_lock(&fs_info->qgroup_lock);
1077 del_qgroup_rb(quota_root->fs_info, qgroupid);
1078 spin_unlock(&fs_info->qgroup_lock);
1080 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1084 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1085 struct btrfs_fs_info *fs_info, u64 qgroupid,
1086 struct btrfs_qgroup_limit *limit)
1088 struct btrfs_root *quota_root;
1089 struct btrfs_qgroup *qgroup;
1092 mutex_lock(&fs_info->qgroup_ioctl_lock);
1093 quota_root = fs_info->quota_root;
1099 ret = update_qgroup_limit_item(trans, quota_root, qgroupid,
1100 limit->flags, limit->max_rfer,
1101 limit->max_excl, limit->rsv_rfer,
1104 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1105 printk(KERN_INFO "unable to update quota limit for %llu\n",
1106 (unsigned long long)qgroupid);
1109 spin_lock(&fs_info->qgroup_lock);
1111 qgroup = find_qgroup_rb(fs_info, qgroupid);
1116 qgroup->lim_flags = limit->flags;
1117 qgroup->max_rfer = limit->max_rfer;
1118 qgroup->max_excl = limit->max_excl;
1119 qgroup->rsv_rfer = limit->rsv_rfer;
1120 qgroup->rsv_excl = limit->rsv_excl;
1123 spin_unlock(&fs_info->qgroup_lock);
1125 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1129 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1130 struct btrfs_qgroup *qgroup)
1132 if (list_empty(&qgroup->dirty))
1133 list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1137 * btrfs_qgroup_record_ref is called when the ref is added or deleted. it puts
1138 * the modification into a list that's later used by btrfs_end_transaction to
1139 * pass the recorded modifications on to btrfs_qgroup_account_ref.
1141 int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
1142 struct btrfs_delayed_ref_node *node,
1143 struct btrfs_delayed_extent_op *extent_op)
1145 struct qgroup_update *u;
1147 BUG_ON(!trans->delayed_ref_elem.seq);
1148 u = kmalloc(sizeof(*u), GFP_NOFS);
1153 u->extent_op = extent_op;
1154 list_add_tail(&u->list, &trans->qgroup_ref_list);
1160 * btrfs_qgroup_account_ref is called for every ref that is added to or deleted
1161 * from the fs. First, all roots referencing the extent are searched, and
1162 * then the space is accounted accordingly to the different roots. The
1163 * accounting algorithm works in 3 steps documented inline.
1165 int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
1166 struct btrfs_fs_info *fs_info,
1167 struct btrfs_delayed_ref_node *node,
1168 struct btrfs_delayed_extent_op *extent_op)
1170 struct btrfs_key ins;
1171 struct btrfs_root *quota_root;
1173 struct btrfs_qgroup *qgroup;
1174 struct ulist_node *unode;
1175 struct ulist *roots = NULL;
1176 struct ulist *tmp = NULL;
1177 struct ulist_iterator uiter;
1182 if (!fs_info->quota_enabled)
1185 BUG_ON(!fs_info->quota_root);
1187 ins.objectid = node->bytenr;
1188 ins.offset = node->num_bytes;
1189 ins.type = BTRFS_EXTENT_ITEM_KEY;
1191 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1192 node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
1193 struct btrfs_delayed_tree_ref *ref;
1194 ref = btrfs_delayed_node_to_tree_ref(node);
1195 ref_root = ref->root;
1196 } else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1197 node->type == BTRFS_SHARED_DATA_REF_KEY) {
1198 struct btrfs_delayed_data_ref *ref;
1199 ref = btrfs_delayed_node_to_data_ref(node);
1200 ref_root = ref->root;
1205 if (!is_fstree(ref_root)) {
1207 * non-fs-trees are not being accounted
1212 switch (node->action) {
1213 case BTRFS_ADD_DELAYED_REF:
1214 case BTRFS_ADD_DELAYED_EXTENT:
1217 case BTRFS_DROP_DELAYED_REF:
1220 case BTRFS_UPDATE_DELAYED_HEAD:
1227 * the delayed ref sequence number we pass depends on the direction of
1228 * the operation. for add operations, we pass (node->seq - 1) to skip
1229 * the delayed ref's current sequence number, because we need the state
1230 * of the tree before the add operation. for delete operations, we pass
1231 * (node->seq) to include the delayed ref's current sequence number,
1232 * because we need the state of the tree after the delete operation.
1234 ret = btrfs_find_all_roots(trans, fs_info, node->bytenr,
1235 sgn > 0 ? node->seq - 1 : node->seq, &roots);
1239 spin_lock(&fs_info->qgroup_lock);
1240 quota_root = fs_info->quota_root;
1244 qgroup = find_qgroup_rb(fs_info, ref_root);
1249 * step 1: for each old ref, visit all nodes once and inc refcnt
1251 tmp = ulist_alloc(GFP_ATOMIC);
1256 seq = fs_info->qgroup_seq;
1257 fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
1259 ULIST_ITER_INIT(&uiter);
1260 while ((unode = ulist_next(roots, &uiter))) {
1261 struct ulist_node *tmp_unode;
1262 struct ulist_iterator tmp_uiter;
1263 struct btrfs_qgroup *qg;
1265 qg = find_qgroup_rb(fs_info, unode->val);
1270 /* XXX id not needed */
1271 ulist_add(tmp, qg->qgroupid, (u64)(uintptr_t)qg, GFP_ATOMIC);
1272 ULIST_ITER_INIT(&tmp_uiter);
1273 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1274 struct btrfs_qgroup_list *glist;
1276 qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
1277 if (qg->refcnt < seq)
1278 qg->refcnt = seq + 1;
1282 list_for_each_entry(glist, &qg->groups, next_group) {
1283 ulist_add(tmp, glist->group->qgroupid,
1284 (u64)(uintptr_t)glist->group,
1291 * step 2: walk from the new root
1294 ulist_add(tmp, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
1295 ULIST_ITER_INIT(&uiter);
1296 while ((unode = ulist_next(tmp, &uiter))) {
1297 struct btrfs_qgroup *qg;
1298 struct btrfs_qgroup_list *glist;
1300 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
1301 if (qg->refcnt < seq) {
1302 /* not visited by step 1 */
1303 qg->rfer += sgn * node->num_bytes;
1304 qg->rfer_cmpr += sgn * node->num_bytes;
1305 if (roots->nnodes == 0) {
1306 qg->excl += sgn * node->num_bytes;
1307 qg->excl_cmpr += sgn * node->num_bytes;
1309 qgroup_dirty(fs_info, qg);
1311 WARN_ON(qg->tag >= seq);
1314 list_for_each_entry(glist, &qg->groups, next_group) {
1315 ulist_add(tmp, glist->group->qgroupid,
1316 (uintptr_t)glist->group, GFP_ATOMIC);
1321 * step 3: walk again from old refs
1323 ULIST_ITER_INIT(&uiter);
1324 while ((unode = ulist_next(roots, &uiter))) {
1325 struct btrfs_qgroup *qg;
1326 struct ulist_node *tmp_unode;
1327 struct ulist_iterator tmp_uiter;
1329 qg = find_qgroup_rb(fs_info, unode->val);
1334 ulist_add(tmp, qg->qgroupid, (uintptr_t)qg, GFP_ATOMIC);
1335 ULIST_ITER_INIT(&tmp_uiter);
1336 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1337 struct btrfs_qgroup_list *glist;
1339 qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
1343 if (qg->refcnt - seq == roots->nnodes) {
1344 qg->excl -= sgn * node->num_bytes;
1345 qg->excl_cmpr -= sgn * node->num_bytes;
1346 qgroup_dirty(fs_info, qg);
1349 list_for_each_entry(glist, &qg->groups, next_group) {
1350 ulist_add(tmp, glist->group->qgroupid,
1351 (uintptr_t)glist->group,
1358 spin_unlock(&fs_info->qgroup_lock);
1366 * called from commit_transaction. Writes all changed qgroups to disk.
1368 int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
1369 struct btrfs_fs_info *fs_info)
1371 struct btrfs_root *quota_root = fs_info->quota_root;
1377 fs_info->quota_enabled = fs_info->pending_quota_state;
1379 spin_lock(&fs_info->qgroup_lock);
1380 while (!list_empty(&fs_info->dirty_qgroups)) {
1381 struct btrfs_qgroup *qgroup;
1382 qgroup = list_first_entry(&fs_info->dirty_qgroups,
1383 struct btrfs_qgroup, dirty);
1384 list_del_init(&qgroup->dirty);
1385 spin_unlock(&fs_info->qgroup_lock);
1386 ret = update_qgroup_info_item(trans, quota_root, qgroup);
1388 fs_info->qgroup_flags |=
1389 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1390 spin_lock(&fs_info->qgroup_lock);
1392 if (fs_info->quota_enabled)
1393 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
1395 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1396 spin_unlock(&fs_info->qgroup_lock);
1398 ret = update_qgroup_status_item(trans, fs_info, quota_root);
1400 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1408 * copy the acounting information between qgroups. This is necessary when a
1409 * snapshot or a subvolume is created
1411 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1412 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
1413 struct btrfs_qgroup_inherit *inherit)
1418 struct btrfs_root *quota_root = fs_info->quota_root;
1419 struct btrfs_qgroup *srcgroup;
1420 struct btrfs_qgroup *dstgroup;
1423 mutex_lock(&fs_info->qgroup_ioctl_lock);
1424 if (!fs_info->quota_enabled)
1433 * create a tracking group for the subvol itself
1435 ret = add_qgroup_item(trans, quota_root, objectid);
1439 if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
1440 ret = update_qgroup_limit_item(trans, quota_root, objectid,
1442 inherit->lim.max_rfer,
1443 inherit->lim.max_excl,
1444 inherit->lim.rsv_rfer,
1445 inherit->lim.rsv_excl);
1451 struct btrfs_root *srcroot;
1452 struct btrfs_key srckey;
1455 srckey.objectid = srcid;
1456 srckey.type = BTRFS_ROOT_ITEM_KEY;
1457 srckey.offset = (u64)-1;
1458 srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
1459 if (IS_ERR(srcroot)) {
1460 ret = PTR_ERR(srcroot);
1465 srcroot_level = btrfs_header_level(srcroot->node);
1466 level_size = btrfs_level_size(srcroot, srcroot_level);
1471 * add qgroup to all inherited groups
1474 i_qgroups = (u64 *)(inherit + 1);
1475 for (i = 0; i < inherit->num_qgroups; ++i) {
1476 ret = add_qgroup_relation_item(trans, quota_root,
1477 objectid, *i_qgroups);
1480 ret = add_qgroup_relation_item(trans, quota_root,
1481 *i_qgroups, objectid);
1489 spin_lock(&fs_info->qgroup_lock);
1491 dstgroup = add_qgroup_rb(fs_info, objectid);
1492 if (IS_ERR(dstgroup)) {
1493 ret = PTR_ERR(dstgroup);
1498 srcgroup = find_qgroup_rb(fs_info, srcid);
1501 dstgroup->rfer = srcgroup->rfer - level_size;
1502 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr - level_size;
1503 srcgroup->excl = level_size;
1504 srcgroup->excl_cmpr = level_size;
1505 qgroup_dirty(fs_info, dstgroup);
1506 qgroup_dirty(fs_info, srcgroup);
1512 i_qgroups = (u64 *)(inherit + 1);
1513 for (i = 0; i < inherit->num_qgroups; ++i) {
1514 ret = add_relation_rb(quota_root->fs_info, objectid,
1521 for (i = 0; i < inherit->num_ref_copies; ++i) {
1522 struct btrfs_qgroup *src;
1523 struct btrfs_qgroup *dst;
1525 src = find_qgroup_rb(fs_info, i_qgroups[0]);
1526 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
1533 dst->rfer = src->rfer - level_size;
1534 dst->rfer_cmpr = src->rfer_cmpr - level_size;
1537 for (i = 0; i < inherit->num_excl_copies; ++i) {
1538 struct btrfs_qgroup *src;
1539 struct btrfs_qgroup *dst;
1541 src = find_qgroup_rb(fs_info, i_qgroups[0]);
1542 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
1549 dst->excl = src->excl + level_size;
1550 dst->excl_cmpr = src->excl_cmpr + level_size;
1555 spin_unlock(&fs_info->qgroup_lock);
1557 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1562 * reserve some space for a qgroup and all its parents. The reservation takes
1563 * place with start_transaction or dealloc_reserve, similar to ENOSPC
1564 * accounting. If not enough space is available, EDQUOT is returned.
1565 * We assume that the requested space is new for all qgroups.
1567 int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
1569 struct btrfs_root *quota_root;
1570 struct btrfs_qgroup *qgroup;
1571 struct btrfs_fs_info *fs_info = root->fs_info;
1572 u64 ref_root = root->root_key.objectid;
1574 struct ulist *ulist = NULL;
1575 struct ulist_node *unode;
1576 struct ulist_iterator uiter;
1578 if (!is_fstree(ref_root))
1584 spin_lock(&fs_info->qgroup_lock);
1585 quota_root = fs_info->quota_root;
1589 qgroup = find_qgroup_rb(fs_info, ref_root);
1594 * in a first step, we check all affected qgroups if any limits would
1597 ulist = ulist_alloc(GFP_ATOMIC);
1602 ulist_add(ulist, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
1603 ULIST_ITER_INIT(&uiter);
1604 while ((unode = ulist_next(ulist, &uiter))) {
1605 struct btrfs_qgroup *qg;
1606 struct btrfs_qgroup_list *glist;
1608 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
1610 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
1611 qg->reserved + qg->rfer + num_bytes >
1617 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
1618 qg->reserved + qg->excl + num_bytes >
1624 list_for_each_entry(glist, &qg->groups, next_group) {
1625 ulist_add(ulist, glist->group->qgroupid,
1626 (uintptr_t)glist->group, GFP_ATOMIC);
1631 * no limits exceeded, now record the reservation into all qgroups
1633 ULIST_ITER_INIT(&uiter);
1634 while ((unode = ulist_next(ulist, &uiter))) {
1635 struct btrfs_qgroup *qg;
1637 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
1639 qg->reserved += num_bytes;
1643 spin_unlock(&fs_info->qgroup_lock);
1649 void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
1651 struct btrfs_root *quota_root;
1652 struct btrfs_qgroup *qgroup;
1653 struct btrfs_fs_info *fs_info = root->fs_info;
1654 struct ulist *ulist = NULL;
1655 struct ulist_node *unode;
1656 struct ulist_iterator uiter;
1657 u64 ref_root = root->root_key.objectid;
1659 if (!is_fstree(ref_root))
1665 spin_lock(&fs_info->qgroup_lock);
1667 quota_root = fs_info->quota_root;
1671 qgroup = find_qgroup_rb(fs_info, ref_root);
1675 ulist = ulist_alloc(GFP_ATOMIC);
1677 btrfs_std_error(fs_info, -ENOMEM);
1680 ulist_add(ulist, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
1681 ULIST_ITER_INIT(&uiter);
1682 while ((unode = ulist_next(ulist, &uiter))) {
1683 struct btrfs_qgroup *qg;
1684 struct btrfs_qgroup_list *glist;
1686 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
1688 qg->reserved -= num_bytes;
1690 list_for_each_entry(glist, &qg->groups, next_group) {
1691 ulist_add(ulist, glist->group->qgroupid,
1692 (uintptr_t)glist->group, GFP_ATOMIC);
1697 spin_unlock(&fs_info->qgroup_lock);
1701 void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
1703 if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
1705 printk(KERN_ERR "btrfs: qgroups not uptodate in trans handle %p: list is%s empty, seq is %llu\n",
1706 trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
1707 trans->delayed_ref_elem.seq);