2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include <linux/btrfs.h>
29 #include "transaction.h"
34 #include "extent_io.h"
37 * - subvol delete -> delete when ref goes to 0? delete limits also?
41 * - copy also limits on subvol creation
43 * - caches fuer ulists
44 * - performance benchmarks
45 * - check all ioctl parameters
49 * one struct for each qgroup, organized in fs_info->qgroup_tree.
57 u64 rfer; /* referenced */
58 u64 rfer_cmpr; /* referenced compressed */
59 u64 excl; /* exclusive */
60 u64 excl_cmpr; /* exclusive compressed */
65 u64 lim_flags; /* which limits are set */
72 * reservation tracking
79 struct list_head groups; /* groups this group is member of */
80 struct list_head members; /* groups that are members of this group */
81 struct list_head dirty; /* dirty groups */
82 struct rb_node node; /* tree of qgroups */
85 * temp variables for accounting operations
92 * glue structure to represent the relations between qgroups.
94 struct btrfs_qgroup_list {
95 struct list_head next_group;
96 struct list_head next_member;
97 struct btrfs_qgroup *group;
98 struct btrfs_qgroup *member;
102 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
104 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
106 /* must be called with qgroup_ioctl_lock held */
107 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
110 struct rb_node *n = fs_info->qgroup_tree.rb_node;
111 struct btrfs_qgroup *qgroup;
114 qgroup = rb_entry(n, struct btrfs_qgroup, node);
115 if (qgroup->qgroupid < qgroupid)
117 else if (qgroup->qgroupid > qgroupid)
125 /* must be called with qgroup_lock held */
126 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
129 struct rb_node **p = &fs_info->qgroup_tree.rb_node;
130 struct rb_node *parent = NULL;
131 struct btrfs_qgroup *qgroup;
135 qgroup = rb_entry(parent, struct btrfs_qgroup, node);
137 if (qgroup->qgroupid < qgroupid)
139 else if (qgroup->qgroupid > qgroupid)
145 qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
147 return ERR_PTR(-ENOMEM);
149 qgroup->qgroupid = qgroupid;
150 INIT_LIST_HEAD(&qgroup->groups);
151 INIT_LIST_HEAD(&qgroup->members);
152 INIT_LIST_HEAD(&qgroup->dirty);
154 rb_link_node(&qgroup->node, parent, p);
155 rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
160 /* must be called with qgroup_lock held */
161 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
163 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
164 struct btrfs_qgroup_list *list;
169 rb_erase(&qgroup->node, &fs_info->qgroup_tree);
170 list_del(&qgroup->dirty);
172 while (!list_empty(&qgroup->groups)) {
173 list = list_first_entry(&qgroup->groups,
174 struct btrfs_qgroup_list, next_group);
175 list_del(&list->next_group);
176 list_del(&list->next_member);
180 while (!list_empty(&qgroup->members)) {
181 list = list_first_entry(&qgroup->members,
182 struct btrfs_qgroup_list, next_member);
183 list_del(&list->next_group);
184 list_del(&list->next_member);
192 /* must be called with qgroup_lock held */
193 static int add_relation_rb(struct btrfs_fs_info *fs_info,
194 u64 memberid, u64 parentid)
196 struct btrfs_qgroup *member;
197 struct btrfs_qgroup *parent;
198 struct btrfs_qgroup_list *list;
200 member = find_qgroup_rb(fs_info, memberid);
201 parent = find_qgroup_rb(fs_info, parentid);
202 if (!member || !parent)
205 list = kzalloc(sizeof(*list), GFP_ATOMIC);
209 list->group = parent;
210 list->member = member;
211 list_add_tail(&list->next_group, &member->groups);
212 list_add_tail(&list->next_member, &parent->members);
217 /* must be called with qgroup_lock held */
218 static int del_relation_rb(struct btrfs_fs_info *fs_info,
219 u64 memberid, u64 parentid)
221 struct btrfs_qgroup *member;
222 struct btrfs_qgroup *parent;
223 struct btrfs_qgroup_list *list;
225 member = find_qgroup_rb(fs_info, memberid);
226 parent = find_qgroup_rb(fs_info, parentid);
227 if (!member || !parent)
230 list_for_each_entry(list, &member->groups, next_group) {
231 if (list->group == parent) {
232 list_del(&list->next_group);
233 list_del(&list->next_member);
242 * The full config is read in one go, only called from open_ctree()
243 * It doesn't use any locking, as at this point we're still single-threaded
245 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
247 struct btrfs_key key;
248 struct btrfs_key found_key;
249 struct btrfs_root *quota_root = fs_info->quota_root;
250 struct btrfs_path *path = NULL;
251 struct extent_buffer *l;
255 u64 rescan_progress = 0;
257 if (!fs_info->quota_enabled)
260 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
261 if (!fs_info->qgroup_ulist) {
266 path = btrfs_alloc_path();
272 /* default this to quota off, in case no status key is found */
273 fs_info->qgroup_flags = 0;
276 * pass 1: read status, all qgroup infos and limits
281 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
286 struct btrfs_qgroup *qgroup;
288 slot = path->slots[0];
290 btrfs_item_key_to_cpu(l, &found_key, slot);
292 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
293 struct btrfs_qgroup_status_item *ptr;
295 ptr = btrfs_item_ptr(l, slot,
296 struct btrfs_qgroup_status_item);
298 if (btrfs_qgroup_status_version(l, ptr) !=
299 BTRFS_QGROUP_STATUS_VERSION) {
301 "btrfs: old qgroup version, quota disabled\n");
304 if (btrfs_qgroup_status_generation(l, ptr) !=
305 fs_info->generation) {
306 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
308 "btrfs: qgroup generation mismatch, "
309 "marked as inconsistent\n");
311 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
313 rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
317 if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
318 found_key.type != BTRFS_QGROUP_LIMIT_KEY)
321 qgroup = find_qgroup_rb(fs_info, found_key.offset);
322 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
323 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
324 printk(KERN_ERR "btrfs: inconsitent qgroup config\n");
325 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
328 qgroup = add_qgroup_rb(fs_info, found_key.offset);
329 if (IS_ERR(qgroup)) {
330 ret = PTR_ERR(qgroup);
334 switch (found_key.type) {
335 case BTRFS_QGROUP_INFO_KEY: {
336 struct btrfs_qgroup_info_item *ptr;
338 ptr = btrfs_item_ptr(l, slot,
339 struct btrfs_qgroup_info_item);
340 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
341 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
342 qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
343 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
344 /* generation currently unused */
347 case BTRFS_QGROUP_LIMIT_KEY: {
348 struct btrfs_qgroup_limit_item *ptr;
350 ptr = btrfs_item_ptr(l, slot,
351 struct btrfs_qgroup_limit_item);
352 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
353 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
354 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
355 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
356 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
361 ret = btrfs_next_item(quota_root, path);
367 btrfs_release_path(path);
370 * pass 2: read all qgroup relations
373 key.type = BTRFS_QGROUP_RELATION_KEY;
375 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
379 slot = path->slots[0];
381 btrfs_item_key_to_cpu(l, &found_key, slot);
383 if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
386 if (found_key.objectid > found_key.offset) {
387 /* parent <- member, not needed to build config */
388 /* FIXME should we omit the key completely? */
392 ret = add_relation_rb(fs_info, found_key.objectid,
394 if (ret == -ENOENT) {
396 "btrfs: orphan qgroup relation 0x%llx->0x%llx\n",
397 (unsigned long long)found_key.objectid,
398 (unsigned long long)found_key.offset);
399 ret = 0; /* ignore the error */
404 ret = btrfs_next_item(quota_root, path);
411 fs_info->qgroup_flags |= flags;
412 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
413 fs_info->quota_enabled = 0;
414 fs_info->pending_quota_state = 0;
415 } else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
417 ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
419 btrfs_free_path(path);
422 ulist_free(fs_info->qgroup_ulist);
423 fs_info->qgroup_ulist = NULL;
424 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
427 return ret < 0 ? ret : 0;
431 * This is only called from close_ctree() or open_ctree(), both in single-
432 * treaded paths. Clean up the in-memory structures. No locking needed.
434 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
437 struct btrfs_qgroup *qgroup;
438 struct btrfs_qgroup_list *list;
440 while ((n = rb_first(&fs_info->qgroup_tree))) {
441 qgroup = rb_entry(n, struct btrfs_qgroup, node);
442 rb_erase(n, &fs_info->qgroup_tree);
444 while (!list_empty(&qgroup->groups)) {
445 list = list_first_entry(&qgroup->groups,
446 struct btrfs_qgroup_list,
448 list_del(&list->next_group);
449 list_del(&list->next_member);
453 while (!list_empty(&qgroup->members)) {
454 list = list_first_entry(&qgroup->members,
455 struct btrfs_qgroup_list,
457 list_del(&list->next_group);
458 list_del(&list->next_member);
464 * we call btrfs_free_qgroup_config() when umounting
465 * filesystem and disabling quota, so we set qgroup_ulit
466 * to be null here to avoid double free.
468 ulist_free(fs_info->qgroup_ulist);
469 fs_info->qgroup_ulist = NULL;
472 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
473 struct btrfs_root *quota_root,
477 struct btrfs_path *path;
478 struct btrfs_key key;
480 path = btrfs_alloc_path();
485 key.type = BTRFS_QGROUP_RELATION_KEY;
488 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
490 btrfs_mark_buffer_dirty(path->nodes[0]);
492 btrfs_free_path(path);
496 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
497 struct btrfs_root *quota_root,
501 struct btrfs_path *path;
502 struct btrfs_key key;
504 path = btrfs_alloc_path();
509 key.type = BTRFS_QGROUP_RELATION_KEY;
512 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
521 ret = btrfs_del_item(trans, quota_root, path);
523 btrfs_free_path(path);
527 static int add_qgroup_item(struct btrfs_trans_handle *trans,
528 struct btrfs_root *quota_root, u64 qgroupid)
531 struct btrfs_path *path;
532 struct btrfs_qgroup_info_item *qgroup_info;
533 struct btrfs_qgroup_limit_item *qgroup_limit;
534 struct extent_buffer *leaf;
535 struct btrfs_key key;
537 path = btrfs_alloc_path();
542 key.type = BTRFS_QGROUP_INFO_KEY;
543 key.offset = qgroupid;
545 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
546 sizeof(*qgroup_info));
550 leaf = path->nodes[0];
551 qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
552 struct btrfs_qgroup_info_item);
553 btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
554 btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
555 btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
556 btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
557 btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
559 btrfs_mark_buffer_dirty(leaf);
561 btrfs_release_path(path);
563 key.type = BTRFS_QGROUP_LIMIT_KEY;
564 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
565 sizeof(*qgroup_limit));
569 leaf = path->nodes[0];
570 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
571 struct btrfs_qgroup_limit_item);
572 btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
573 btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
574 btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
575 btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
576 btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
578 btrfs_mark_buffer_dirty(leaf);
582 btrfs_free_path(path);
586 static int del_qgroup_item(struct btrfs_trans_handle *trans,
587 struct btrfs_root *quota_root, u64 qgroupid)
590 struct btrfs_path *path;
591 struct btrfs_key key;
593 path = btrfs_alloc_path();
598 key.type = BTRFS_QGROUP_INFO_KEY;
599 key.offset = qgroupid;
600 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
609 ret = btrfs_del_item(trans, quota_root, path);
613 btrfs_release_path(path);
615 key.type = BTRFS_QGROUP_LIMIT_KEY;
616 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
625 ret = btrfs_del_item(trans, quota_root, path);
628 btrfs_free_path(path);
632 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
633 struct btrfs_root *root, u64 qgroupid,
634 u64 flags, u64 max_rfer, u64 max_excl,
635 u64 rsv_rfer, u64 rsv_excl)
637 struct btrfs_path *path;
638 struct btrfs_key key;
639 struct extent_buffer *l;
640 struct btrfs_qgroup_limit_item *qgroup_limit;
645 key.type = BTRFS_QGROUP_LIMIT_KEY;
646 key.offset = qgroupid;
648 path = btrfs_alloc_path();
652 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
660 slot = path->slots[0];
661 qgroup_limit = btrfs_item_ptr(l, path->slots[0],
662 struct btrfs_qgroup_limit_item);
663 btrfs_set_qgroup_limit_flags(l, qgroup_limit, flags);
664 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, max_rfer);
665 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, max_excl);
666 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, rsv_rfer);
667 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, rsv_excl);
669 btrfs_mark_buffer_dirty(l);
672 btrfs_free_path(path);
676 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
677 struct btrfs_root *root,
678 struct btrfs_qgroup *qgroup)
680 struct btrfs_path *path;
681 struct btrfs_key key;
682 struct extent_buffer *l;
683 struct btrfs_qgroup_info_item *qgroup_info;
688 key.type = BTRFS_QGROUP_INFO_KEY;
689 key.offset = qgroup->qgroupid;
691 path = btrfs_alloc_path();
695 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
703 slot = path->slots[0];
704 qgroup_info = btrfs_item_ptr(l, path->slots[0],
705 struct btrfs_qgroup_info_item);
706 btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
707 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
708 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
709 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
710 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
712 btrfs_mark_buffer_dirty(l);
715 btrfs_free_path(path);
719 static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
720 struct btrfs_fs_info *fs_info,
721 struct btrfs_root *root)
723 struct btrfs_path *path;
724 struct btrfs_key key;
725 struct extent_buffer *l;
726 struct btrfs_qgroup_status_item *ptr;
731 key.type = BTRFS_QGROUP_STATUS_KEY;
734 path = btrfs_alloc_path();
738 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
746 slot = path->slots[0];
747 ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
748 btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
749 btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
750 btrfs_set_qgroup_status_rescan(l, ptr,
751 fs_info->qgroup_rescan_progress.objectid);
753 btrfs_mark_buffer_dirty(l);
756 btrfs_free_path(path);
761 * called with qgroup_lock held
763 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
764 struct btrfs_root *root)
766 struct btrfs_path *path;
767 struct btrfs_key key;
768 struct extent_buffer *leaf = NULL;
772 path = btrfs_alloc_path();
776 path->leave_spinning = 1;
783 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
786 leaf = path->nodes[0];
787 nr = btrfs_header_nritems(leaf);
791 * delete the leaf one by one
792 * since the whole tree is going
796 ret = btrfs_del_items(trans, root, path, 0, nr);
800 btrfs_release_path(path);
804 root->fs_info->pending_quota_state = 0;
805 btrfs_free_path(path);
809 int btrfs_quota_enable(struct btrfs_trans_handle *trans,
810 struct btrfs_fs_info *fs_info)
812 struct btrfs_root *quota_root;
813 struct btrfs_root *tree_root = fs_info->tree_root;
814 struct btrfs_path *path = NULL;
815 struct btrfs_qgroup_status_item *ptr;
816 struct extent_buffer *leaf;
817 struct btrfs_key key;
818 struct btrfs_key found_key;
819 struct btrfs_qgroup *qgroup = NULL;
823 mutex_lock(&fs_info->qgroup_ioctl_lock);
824 if (fs_info->quota_root) {
825 fs_info->pending_quota_state = 1;
829 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
830 if (!fs_info->qgroup_ulist) {
836 * initially create the quota tree
838 quota_root = btrfs_create_tree(trans, fs_info,
839 BTRFS_QUOTA_TREE_OBJECTID);
840 if (IS_ERR(quota_root)) {
841 ret = PTR_ERR(quota_root);
845 path = btrfs_alloc_path();
852 key.type = BTRFS_QGROUP_STATUS_KEY;
855 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
860 leaf = path->nodes[0];
861 ptr = btrfs_item_ptr(leaf, path->slots[0],
862 struct btrfs_qgroup_status_item);
863 btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
864 btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
865 fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
866 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
867 btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
868 btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
870 btrfs_mark_buffer_dirty(leaf);
873 key.type = BTRFS_ROOT_REF_KEY;
876 btrfs_release_path(path);
877 ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
885 slot = path->slots[0];
886 leaf = path->nodes[0];
887 btrfs_item_key_to_cpu(leaf, &found_key, slot);
889 if (found_key.type == BTRFS_ROOT_REF_KEY) {
890 ret = add_qgroup_item(trans, quota_root,
895 qgroup = add_qgroup_rb(fs_info, found_key.offset);
896 if (IS_ERR(qgroup)) {
897 ret = PTR_ERR(qgroup);
901 ret = btrfs_next_item(tree_root, path);
909 btrfs_release_path(path);
910 ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
914 qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
915 if (IS_ERR(qgroup)) {
916 ret = PTR_ERR(qgroup);
919 spin_lock(&fs_info->qgroup_lock);
920 fs_info->quota_root = quota_root;
921 fs_info->pending_quota_state = 1;
922 spin_unlock(&fs_info->qgroup_lock);
924 btrfs_free_path(path);
927 free_extent_buffer(quota_root->node);
928 free_extent_buffer(quota_root->commit_root);
933 ulist_free(fs_info->qgroup_ulist);
934 fs_info->qgroup_ulist = NULL;
936 mutex_unlock(&fs_info->qgroup_ioctl_lock);
940 int btrfs_quota_disable(struct btrfs_trans_handle *trans,
941 struct btrfs_fs_info *fs_info)
943 struct btrfs_root *tree_root = fs_info->tree_root;
944 struct btrfs_root *quota_root;
947 mutex_lock(&fs_info->qgroup_ioctl_lock);
948 if (!fs_info->quota_root)
950 spin_lock(&fs_info->qgroup_lock);
951 fs_info->quota_enabled = 0;
952 fs_info->pending_quota_state = 0;
953 quota_root = fs_info->quota_root;
954 fs_info->quota_root = NULL;
955 btrfs_free_qgroup_config(fs_info);
956 spin_unlock(&fs_info->qgroup_lock);
963 ret = btrfs_clean_quota_tree(trans, quota_root);
967 ret = btrfs_del_root(trans, tree_root, "a_root->root_key);
971 list_del("a_root->dirty_list);
973 btrfs_tree_lock(quota_root->node);
974 clean_tree_block(trans, tree_root, quota_root->node);
975 btrfs_tree_unlock(quota_root->node);
976 btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
978 free_extent_buffer(quota_root->node);
979 free_extent_buffer(quota_root->commit_root);
982 mutex_unlock(&fs_info->qgroup_ioctl_lock);
986 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
987 struct btrfs_qgroup *qgroup)
989 if (list_empty(&qgroup->dirty))
990 list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
993 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
994 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
996 struct btrfs_root *quota_root;
997 struct btrfs_qgroup *parent;
998 struct btrfs_qgroup *member;
999 struct btrfs_qgroup_list *list;
1002 mutex_lock(&fs_info->qgroup_ioctl_lock);
1003 quota_root = fs_info->quota_root;
1008 member = find_qgroup_rb(fs_info, src);
1009 parent = find_qgroup_rb(fs_info, dst);
1010 if (!member || !parent) {
1015 /* check if such qgroup relation exist firstly */
1016 list_for_each_entry(list, &member->groups, next_group) {
1017 if (list->group == parent) {
1023 ret = add_qgroup_relation_item(trans, quota_root, src, dst);
1027 ret = add_qgroup_relation_item(trans, quota_root, dst, src);
1029 del_qgroup_relation_item(trans, quota_root, src, dst);
1033 spin_lock(&fs_info->qgroup_lock);
1034 ret = add_relation_rb(quota_root->fs_info, src, dst);
1035 spin_unlock(&fs_info->qgroup_lock);
1037 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1041 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
1042 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1044 struct btrfs_root *quota_root;
1045 struct btrfs_qgroup *parent;
1046 struct btrfs_qgroup *member;
1047 struct btrfs_qgroup_list *list;
1051 mutex_lock(&fs_info->qgroup_ioctl_lock);
1052 quota_root = fs_info->quota_root;
1058 member = find_qgroup_rb(fs_info, src);
1059 parent = find_qgroup_rb(fs_info, dst);
1060 if (!member || !parent) {
1065 /* check if such qgroup relation exist firstly */
1066 list_for_each_entry(list, &member->groups, next_group) {
1067 if (list->group == parent)
1073 ret = del_qgroup_relation_item(trans, quota_root, src, dst);
1074 err = del_qgroup_relation_item(trans, quota_root, dst, src);
1078 spin_lock(&fs_info->qgroup_lock);
1079 del_relation_rb(fs_info, src, dst);
1080 spin_unlock(&fs_info->qgroup_lock);
1082 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1086 int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
1087 struct btrfs_fs_info *fs_info, u64 qgroupid, char *name)
1089 struct btrfs_root *quota_root;
1090 struct btrfs_qgroup *qgroup;
1093 mutex_lock(&fs_info->qgroup_ioctl_lock);
1094 quota_root = fs_info->quota_root;
1099 qgroup = find_qgroup_rb(fs_info, qgroupid);
1105 ret = add_qgroup_item(trans, quota_root, qgroupid);
1109 spin_lock(&fs_info->qgroup_lock);
1110 qgroup = add_qgroup_rb(fs_info, qgroupid);
1111 spin_unlock(&fs_info->qgroup_lock);
1114 ret = PTR_ERR(qgroup);
1116 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1120 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
1121 struct btrfs_fs_info *fs_info, u64 qgroupid)
1123 struct btrfs_root *quota_root;
1124 struct btrfs_qgroup *qgroup;
1127 mutex_lock(&fs_info->qgroup_ioctl_lock);
1128 quota_root = fs_info->quota_root;
1134 qgroup = find_qgroup_rb(fs_info, qgroupid);
1139 /* check if there are no relations to this qgroup */
1140 if (!list_empty(&qgroup->groups) ||
1141 !list_empty(&qgroup->members)) {
1146 ret = del_qgroup_item(trans, quota_root, qgroupid);
1148 spin_lock(&fs_info->qgroup_lock);
1149 del_qgroup_rb(quota_root->fs_info, qgroupid);
1150 spin_unlock(&fs_info->qgroup_lock);
1152 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1156 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1157 struct btrfs_fs_info *fs_info, u64 qgroupid,
1158 struct btrfs_qgroup_limit *limit)
1160 struct btrfs_root *quota_root;
1161 struct btrfs_qgroup *qgroup;
1164 mutex_lock(&fs_info->qgroup_ioctl_lock);
1165 quota_root = fs_info->quota_root;
1171 qgroup = find_qgroup_rb(fs_info, qgroupid);
1176 ret = update_qgroup_limit_item(trans, quota_root, qgroupid,
1177 limit->flags, limit->max_rfer,
1178 limit->max_excl, limit->rsv_rfer,
1181 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1182 printk(KERN_INFO "unable to update quota limit for %llu\n",
1183 (unsigned long long)qgroupid);
1186 spin_lock(&fs_info->qgroup_lock);
1187 qgroup->lim_flags = limit->flags;
1188 qgroup->max_rfer = limit->max_rfer;
1189 qgroup->max_excl = limit->max_excl;
1190 qgroup->rsv_rfer = limit->rsv_rfer;
1191 qgroup->rsv_excl = limit->rsv_excl;
1192 spin_unlock(&fs_info->qgroup_lock);
1194 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1199 * btrfs_qgroup_record_ref is called when the ref is added or deleted. it puts
1200 * the modification into a list that's later used by btrfs_end_transaction to
1201 * pass the recorded modifications on to btrfs_qgroup_account_ref.
1203 int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
1204 struct btrfs_delayed_ref_node *node,
1205 struct btrfs_delayed_extent_op *extent_op)
1207 struct qgroup_update *u;
1209 BUG_ON(!trans->delayed_ref_elem.seq);
1210 u = kmalloc(sizeof(*u), GFP_NOFS);
1215 u->extent_op = extent_op;
1216 list_add_tail(&u->list, &trans->qgroup_ref_list);
1221 static int qgroup_account_ref_step1(struct btrfs_fs_info *fs_info,
1222 struct ulist *roots, struct ulist *tmp,
1225 struct ulist_node *unode;
1226 struct ulist_iterator uiter;
1227 struct ulist_node *tmp_unode;
1228 struct ulist_iterator tmp_uiter;
1229 struct btrfs_qgroup *qg;
1232 ULIST_ITER_INIT(&uiter);
1233 while ((unode = ulist_next(roots, &uiter))) {
1234 qg = find_qgroup_rb(fs_info, unode->val);
1239 /* XXX id not needed */
1240 ret = ulist_add(tmp, qg->qgroupid,
1241 (u64)(uintptr_t)qg, GFP_ATOMIC);
1244 ULIST_ITER_INIT(&tmp_uiter);
1245 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1246 struct btrfs_qgroup_list *glist;
1248 qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
1249 if (qg->refcnt < seq)
1250 qg->refcnt = seq + 1;
1254 list_for_each_entry(glist, &qg->groups, next_group) {
1255 ret = ulist_add(tmp, glist->group->qgroupid,
1256 (u64)(uintptr_t)glist->group,
1267 static int qgroup_account_ref_step2(struct btrfs_fs_info *fs_info,
1268 struct ulist *roots, struct ulist *tmp,
1269 u64 seq, int sgn, u64 num_bytes,
1270 struct btrfs_qgroup *qgroup)
1272 struct ulist_node *unode;
1273 struct ulist_iterator uiter;
1274 struct btrfs_qgroup *qg;
1275 struct btrfs_qgroup_list *glist;
1279 ret = ulist_add(tmp, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
1283 ULIST_ITER_INIT(&uiter);
1284 while ((unode = ulist_next(tmp, &uiter))) {
1285 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
1286 if (qg->refcnt < seq) {
1287 /* not visited by step 1 */
1288 qg->rfer += sgn * num_bytes;
1289 qg->rfer_cmpr += sgn * num_bytes;
1290 if (roots->nnodes == 0) {
1291 qg->excl += sgn * num_bytes;
1292 qg->excl_cmpr += sgn * num_bytes;
1294 qgroup_dirty(fs_info, qg);
1296 WARN_ON(qg->tag >= seq);
1299 list_for_each_entry(glist, &qg->groups, next_group) {
1300 ret = ulist_add(tmp, glist->group->qgroupid,
1301 (uintptr_t)glist->group, GFP_ATOMIC);
1310 static int qgroup_account_ref_step3(struct btrfs_fs_info *fs_info,
1311 struct ulist *roots, struct ulist *tmp,
1312 u64 seq, int sgn, u64 num_bytes)
1314 struct ulist_node *unode;
1315 struct ulist_iterator uiter;
1316 struct btrfs_qgroup *qg;
1317 struct ulist_node *tmp_unode;
1318 struct ulist_iterator tmp_uiter;
1321 ULIST_ITER_INIT(&uiter);
1322 while ((unode = ulist_next(roots, &uiter))) {
1323 qg = find_qgroup_rb(fs_info, unode->val);
1328 ret = ulist_add(tmp, qg->qgroupid, (uintptr_t)qg, GFP_ATOMIC);
1332 ULIST_ITER_INIT(&tmp_uiter);
1333 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1334 struct btrfs_qgroup_list *glist;
1336 qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
1340 if (qg->refcnt - seq == roots->nnodes) {
1341 qg->excl -= sgn * num_bytes;
1342 qg->excl_cmpr -= sgn * num_bytes;
1343 qgroup_dirty(fs_info, qg);
1346 list_for_each_entry(glist, &qg->groups, next_group) {
1347 ret = ulist_add(tmp, glist->group->qgroupid,
1348 (uintptr_t)glist->group,
1360 * btrfs_qgroup_account_ref is called for every ref that is added to or deleted
1361 * from the fs. First, all roots referencing the extent are searched, and
1362 * then the space is accounted accordingly to the different roots. The
1363 * accounting algorithm works in 3 steps documented inline.
1365 int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
1366 struct btrfs_fs_info *fs_info,
1367 struct btrfs_delayed_ref_node *node,
1368 struct btrfs_delayed_extent_op *extent_op)
1370 struct btrfs_key ins;
1371 struct btrfs_root *quota_root;
1373 struct btrfs_qgroup *qgroup;
1374 struct ulist *roots = NULL;
1379 if (!fs_info->quota_enabled)
1382 BUG_ON(!fs_info->quota_root);
1384 ins.objectid = node->bytenr;
1385 ins.offset = node->num_bytes;
1386 ins.type = BTRFS_EXTENT_ITEM_KEY;
1388 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1389 node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
1390 struct btrfs_delayed_tree_ref *ref;
1391 ref = btrfs_delayed_node_to_tree_ref(node);
1392 ref_root = ref->root;
1393 } else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1394 node->type == BTRFS_SHARED_DATA_REF_KEY) {
1395 struct btrfs_delayed_data_ref *ref;
1396 ref = btrfs_delayed_node_to_data_ref(node);
1397 ref_root = ref->root;
1402 if (!is_fstree(ref_root)) {
1404 * non-fs-trees are not being accounted
1409 switch (node->action) {
1410 case BTRFS_ADD_DELAYED_REF:
1411 case BTRFS_ADD_DELAYED_EXTENT:
1413 seq = btrfs_tree_mod_seq_prev(node->seq);
1415 case BTRFS_DROP_DELAYED_REF:
1419 case BTRFS_UPDATE_DELAYED_HEAD:
1425 mutex_lock(&fs_info->qgroup_rescan_lock);
1426 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
1427 if (fs_info->qgroup_rescan_progress.objectid <= node->bytenr) {
1428 mutex_unlock(&fs_info->qgroup_rescan_lock);
1432 mutex_unlock(&fs_info->qgroup_rescan_lock);
1435 * the delayed ref sequence number we pass depends on the direction of
1436 * the operation. for add operations, we pass
1437 * tree_mod_log_prev_seq(node->seq) to skip
1438 * the delayed ref's current sequence number, because we need the state
1439 * of the tree before the add operation. for delete operations, we pass
1440 * (node->seq) to include the delayed ref's current sequence number,
1441 * because we need the state of the tree after the delete operation.
1443 ret = btrfs_find_all_roots(trans, fs_info, node->bytenr, seq, &roots);
1447 spin_lock(&fs_info->qgroup_lock);
1449 quota_root = fs_info->quota_root;
1453 qgroup = find_qgroup_rb(fs_info, ref_root);
1458 * step 1: for each old ref, visit all nodes once and inc refcnt
1460 ulist_reinit(fs_info->qgroup_ulist);
1461 seq = fs_info->qgroup_seq;
1462 fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
1464 ret = qgroup_account_ref_step1(fs_info, roots, fs_info->qgroup_ulist,
1470 * step 2: walk from the new root
1472 ret = qgroup_account_ref_step2(fs_info, roots, fs_info->qgroup_ulist,
1473 seq, sgn, node->num_bytes, qgroup);
1478 * step 3: walk again from old refs
1480 ret = qgroup_account_ref_step3(fs_info, roots, fs_info->qgroup_ulist,
1481 seq, sgn, node->num_bytes);
1486 spin_unlock(&fs_info->qgroup_lock);
1493 * called from commit_transaction. Writes all changed qgroups to disk.
1495 int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
1496 struct btrfs_fs_info *fs_info)
1498 struct btrfs_root *quota_root = fs_info->quota_root;
1500 int start_rescan_worker = 0;
1505 if (!fs_info->quota_enabled && fs_info->pending_quota_state)
1506 start_rescan_worker = 1;
1508 fs_info->quota_enabled = fs_info->pending_quota_state;
1510 spin_lock(&fs_info->qgroup_lock);
1511 while (!list_empty(&fs_info->dirty_qgroups)) {
1512 struct btrfs_qgroup *qgroup;
1513 qgroup = list_first_entry(&fs_info->dirty_qgroups,
1514 struct btrfs_qgroup, dirty);
1515 list_del_init(&qgroup->dirty);
1516 spin_unlock(&fs_info->qgroup_lock);
1517 ret = update_qgroup_info_item(trans, quota_root, qgroup);
1519 fs_info->qgroup_flags |=
1520 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1521 spin_lock(&fs_info->qgroup_lock);
1523 if (fs_info->quota_enabled)
1524 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
1526 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1527 spin_unlock(&fs_info->qgroup_lock);
1529 ret = update_qgroup_status_item(trans, fs_info, quota_root);
1531 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1533 if (!ret && start_rescan_worker) {
1534 ret = qgroup_rescan_init(fs_info, 0, 1);
1536 qgroup_rescan_zero_tracking(fs_info);
1537 btrfs_queue_worker(&fs_info->qgroup_rescan_workers,
1538 &fs_info->qgroup_rescan_work);
1549 * copy the acounting information between qgroups. This is necessary when a
1550 * snapshot or a subvolume is created
1552 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1553 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
1554 struct btrfs_qgroup_inherit *inherit)
1559 struct btrfs_root *quota_root = fs_info->quota_root;
1560 struct btrfs_qgroup *srcgroup;
1561 struct btrfs_qgroup *dstgroup;
1565 mutex_lock(&fs_info->qgroup_ioctl_lock);
1566 if (!fs_info->quota_enabled)
1575 i_qgroups = (u64 *)(inherit + 1);
1576 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
1577 2 * inherit->num_excl_copies;
1578 for (i = 0; i < nums; ++i) {
1579 srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
1589 * create a tracking group for the subvol itself
1591 ret = add_qgroup_item(trans, quota_root, objectid);
1595 if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
1596 ret = update_qgroup_limit_item(trans, quota_root, objectid,
1598 inherit->lim.max_rfer,
1599 inherit->lim.max_excl,
1600 inherit->lim.rsv_rfer,
1601 inherit->lim.rsv_excl);
1607 struct btrfs_root *srcroot;
1608 struct btrfs_key srckey;
1611 srckey.objectid = srcid;
1612 srckey.type = BTRFS_ROOT_ITEM_KEY;
1613 srckey.offset = (u64)-1;
1614 srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
1615 if (IS_ERR(srcroot)) {
1616 ret = PTR_ERR(srcroot);
1621 srcroot_level = btrfs_header_level(srcroot->node);
1622 level_size = btrfs_level_size(srcroot, srcroot_level);
1627 * add qgroup to all inherited groups
1630 i_qgroups = (u64 *)(inherit + 1);
1631 for (i = 0; i < inherit->num_qgroups; ++i) {
1632 ret = add_qgroup_relation_item(trans, quota_root,
1633 objectid, *i_qgroups);
1636 ret = add_qgroup_relation_item(trans, quota_root,
1637 *i_qgroups, objectid);
1645 spin_lock(&fs_info->qgroup_lock);
1647 dstgroup = add_qgroup_rb(fs_info, objectid);
1648 if (IS_ERR(dstgroup)) {
1649 ret = PTR_ERR(dstgroup);
1654 srcgroup = find_qgroup_rb(fs_info, srcid);
1657 dstgroup->rfer = srcgroup->rfer - level_size;
1658 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr - level_size;
1659 srcgroup->excl = level_size;
1660 srcgroup->excl_cmpr = level_size;
1661 qgroup_dirty(fs_info, dstgroup);
1662 qgroup_dirty(fs_info, srcgroup);
1668 i_qgroups = (u64 *)(inherit + 1);
1669 for (i = 0; i < inherit->num_qgroups; ++i) {
1670 ret = add_relation_rb(quota_root->fs_info, objectid,
1677 for (i = 0; i < inherit->num_ref_copies; ++i) {
1678 struct btrfs_qgroup *src;
1679 struct btrfs_qgroup *dst;
1681 src = find_qgroup_rb(fs_info, i_qgroups[0]);
1682 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
1689 dst->rfer = src->rfer - level_size;
1690 dst->rfer_cmpr = src->rfer_cmpr - level_size;
1693 for (i = 0; i < inherit->num_excl_copies; ++i) {
1694 struct btrfs_qgroup *src;
1695 struct btrfs_qgroup *dst;
1697 src = find_qgroup_rb(fs_info, i_qgroups[0]);
1698 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
1705 dst->excl = src->excl + level_size;
1706 dst->excl_cmpr = src->excl_cmpr + level_size;
1711 spin_unlock(&fs_info->qgroup_lock);
1713 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1718 * reserve some space for a qgroup and all its parents. The reservation takes
1719 * place with start_transaction or dealloc_reserve, similar to ENOSPC
1720 * accounting. If not enough space is available, EDQUOT is returned.
1721 * We assume that the requested space is new for all qgroups.
1723 int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
1725 struct btrfs_root *quota_root;
1726 struct btrfs_qgroup *qgroup;
1727 struct btrfs_fs_info *fs_info = root->fs_info;
1728 u64 ref_root = root->root_key.objectid;
1730 struct ulist_node *unode;
1731 struct ulist_iterator uiter;
1733 if (!is_fstree(ref_root))
1739 spin_lock(&fs_info->qgroup_lock);
1740 quota_root = fs_info->quota_root;
1744 qgroup = find_qgroup_rb(fs_info, ref_root);
1749 * in a first step, we check all affected qgroups if any limits would
1752 ulist_reinit(fs_info->qgroup_ulist);
1753 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
1754 (uintptr_t)qgroup, GFP_ATOMIC);
1757 ULIST_ITER_INIT(&uiter);
1758 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
1759 struct btrfs_qgroup *qg;
1760 struct btrfs_qgroup_list *glist;
1762 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
1764 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
1765 qg->reserved + (s64)qg->rfer + num_bytes >
1771 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
1772 qg->reserved + (s64)qg->excl + num_bytes >
1778 list_for_each_entry(glist, &qg->groups, next_group) {
1779 ret = ulist_add(fs_info->qgroup_ulist,
1780 glist->group->qgroupid,
1781 (uintptr_t)glist->group, GFP_ATOMIC);
1788 * no limits exceeded, now record the reservation into all qgroups
1790 ULIST_ITER_INIT(&uiter);
1791 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
1792 struct btrfs_qgroup *qg;
1794 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
1796 qg->reserved += num_bytes;
1800 spin_unlock(&fs_info->qgroup_lock);
1804 void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
1806 struct btrfs_root *quota_root;
1807 struct btrfs_qgroup *qgroup;
1808 struct btrfs_fs_info *fs_info = root->fs_info;
1809 struct ulist_node *unode;
1810 struct ulist_iterator uiter;
1811 u64 ref_root = root->root_key.objectid;
1814 if (!is_fstree(ref_root))
1820 spin_lock(&fs_info->qgroup_lock);
1822 quota_root = fs_info->quota_root;
1826 qgroup = find_qgroup_rb(fs_info, ref_root);
1830 ulist_reinit(fs_info->qgroup_ulist);
1831 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
1832 (uintptr_t)qgroup, GFP_ATOMIC);
1835 ULIST_ITER_INIT(&uiter);
1836 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
1837 struct btrfs_qgroup *qg;
1838 struct btrfs_qgroup_list *glist;
1840 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
1842 qg->reserved -= num_bytes;
1844 list_for_each_entry(glist, &qg->groups, next_group) {
1845 ret = ulist_add(fs_info->qgroup_ulist,
1846 glist->group->qgroupid,
1847 (uintptr_t)glist->group, GFP_ATOMIC);
1854 spin_unlock(&fs_info->qgroup_lock);
1857 void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
1859 if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
1861 pr_err("btrfs: qgroups not uptodate in trans handle %p: list is%s empty, seq is %#x.%x\n",
1862 trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
1863 (u32)(trans->delayed_ref_elem.seq >> 32),
1864 (u32)trans->delayed_ref_elem.seq);
1869 * returns < 0 on error, 0 when more leafs are to be scanned.
1870 * returns 1 when done, 2 when done and FLAG_INCONSISTENT was cleared.
1873 qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1874 struct btrfs_trans_handle *trans, struct ulist *tmp,
1875 struct extent_buffer *scratch_leaf)
1877 struct btrfs_key found;
1878 struct ulist *roots = NULL;
1879 struct ulist_node *unode;
1880 struct ulist_iterator uiter;
1881 struct seq_list tree_mod_seq_elem = {};
1886 path->leave_spinning = 1;
1887 mutex_lock(&fs_info->qgroup_rescan_lock);
1888 ret = btrfs_search_slot_for_read(fs_info->extent_root,
1889 &fs_info->qgroup_rescan_progress,
1892 pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
1893 (unsigned long long)fs_info->qgroup_rescan_progress.objectid,
1894 fs_info->qgroup_rescan_progress.type,
1895 (unsigned long long)fs_info->qgroup_rescan_progress.offset,
1900 * The rescan is about to end, we will not be scanning any
1901 * further blocks. We cannot unset the RESCAN flag here, because
1902 * we want to commit the transaction if everything went well.
1903 * To make the live accounting work in this phase, we set our
1904 * scan progress pointer such that every real extent objectid
1907 fs_info->qgroup_rescan_progress.objectid = (u64)-1;
1908 btrfs_release_path(path);
1909 mutex_unlock(&fs_info->qgroup_rescan_lock);
1913 btrfs_item_key_to_cpu(path->nodes[0], &found,
1914 btrfs_header_nritems(path->nodes[0]) - 1);
1915 fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
1917 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1918 memcpy(scratch_leaf, path->nodes[0], sizeof(*scratch_leaf));
1919 slot = path->slots[0];
1920 btrfs_release_path(path);
1921 mutex_unlock(&fs_info->qgroup_rescan_lock);
1923 for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
1924 btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
1925 if (found.type != BTRFS_EXTENT_ITEM_KEY)
1927 ret = btrfs_find_all_roots(trans, fs_info, found.objectid,
1928 tree_mod_seq_elem.seq, &roots);
1931 spin_lock(&fs_info->qgroup_lock);
1932 seq = fs_info->qgroup_seq;
1933 fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
1935 ret = qgroup_account_ref_step1(fs_info, roots, tmp, seq);
1937 spin_unlock(&fs_info->qgroup_lock);
1943 * step2 of btrfs_qgroup_account_ref works from a single root,
1944 * we're doing all at once here.
1947 ULIST_ITER_INIT(&uiter);
1948 while ((unode = ulist_next(roots, &uiter))) {
1949 struct btrfs_qgroup *qg;
1951 qg = find_qgroup_rb(fs_info, unode->val);
1955 ret = ulist_add(tmp, qg->qgroupid, (uintptr_t)qg,
1958 spin_unlock(&fs_info->qgroup_lock);
1964 /* this loop is similar to step 2 of btrfs_qgroup_account_ref */
1965 ULIST_ITER_INIT(&uiter);
1966 while ((unode = ulist_next(tmp, &uiter))) {
1967 struct btrfs_qgroup *qg;
1968 struct btrfs_qgroup_list *glist;
1970 qg = (struct btrfs_qgroup *)(uintptr_t) unode->aux;
1971 qg->rfer += found.offset;
1972 qg->rfer_cmpr += found.offset;
1973 WARN_ON(qg->tag >= seq);
1974 if (qg->refcnt - seq == roots->nnodes) {
1975 qg->excl += found.offset;
1976 qg->excl_cmpr += found.offset;
1978 qgroup_dirty(fs_info, qg);
1980 list_for_each_entry(glist, &qg->groups, next_group) {
1981 ret = ulist_add(tmp, glist->group->qgroupid,
1982 (uintptr_t)glist->group,
1985 spin_unlock(&fs_info->qgroup_lock);
1992 spin_unlock(&fs_info->qgroup_lock);
1998 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2003 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
2005 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
2006 qgroup_rescan_work);
2007 struct btrfs_path *path;
2008 struct btrfs_trans_handle *trans = NULL;
2009 struct ulist *tmp = NULL;
2010 struct extent_buffer *scratch_leaf = NULL;
2013 path = btrfs_alloc_path();
2016 tmp = ulist_alloc(GFP_NOFS);
2019 scratch_leaf = kmalloc(sizeof(*scratch_leaf), GFP_NOFS);
2025 trans = btrfs_start_transaction(fs_info->fs_root, 0);
2026 if (IS_ERR(trans)) {
2027 err = PTR_ERR(trans);
2030 if (!fs_info->quota_enabled) {
2033 err = qgroup_rescan_leaf(fs_info, path, trans,
2037 btrfs_commit_transaction(trans, fs_info->fs_root);
2039 btrfs_end_transaction(trans, fs_info->fs_root);
2043 kfree(scratch_leaf);
2045 btrfs_free_path(path);
2047 mutex_lock(&fs_info->qgroup_rescan_lock);
2048 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2051 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
2052 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2053 } else if (err < 0) {
2054 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2056 mutex_unlock(&fs_info->qgroup_rescan_lock);
2059 pr_info("btrfs: qgroup scan completed%s\n",
2060 err == 2 ? " (inconsistency flag cleared)" : "");
2062 pr_err("btrfs: qgroup scan failed with %d\n", err);
2065 complete_all(&fs_info->qgroup_rescan_completion);
2069 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
2070 * memory required for the rescan context.
2073 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
2079 (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ||
2080 !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) {
2085 mutex_lock(&fs_info->qgroup_rescan_lock);
2086 spin_lock(&fs_info->qgroup_lock);
2089 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2091 else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
2095 spin_unlock(&fs_info->qgroup_lock);
2096 mutex_unlock(&fs_info->qgroup_rescan_lock);
2100 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2103 memset(&fs_info->qgroup_rescan_progress, 0,
2104 sizeof(fs_info->qgroup_rescan_progress));
2105 fs_info->qgroup_rescan_progress.objectid = progress_objectid;
2107 spin_unlock(&fs_info->qgroup_lock);
2108 mutex_unlock(&fs_info->qgroup_rescan_lock);
2110 init_completion(&fs_info->qgroup_rescan_completion);
2112 memset(&fs_info->qgroup_rescan_work, 0,
2113 sizeof(fs_info->qgroup_rescan_work));
2114 fs_info->qgroup_rescan_work.func = btrfs_qgroup_rescan_worker;
2118 pr_info("btrfs: qgroup_rescan_init failed with %d\n", ret);
2126 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
2129 struct btrfs_qgroup *qgroup;
2131 spin_lock(&fs_info->qgroup_lock);
2132 /* clear all current qgroup tracking information */
2133 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
2134 qgroup = rb_entry(n, struct btrfs_qgroup, node);
2136 qgroup->rfer_cmpr = 0;
2138 qgroup->excl_cmpr = 0;
2140 spin_unlock(&fs_info->qgroup_lock);
2144 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
2147 struct btrfs_trans_handle *trans;
2149 ret = qgroup_rescan_init(fs_info, 0, 1);
2154 * We have set the rescan_progress to 0, which means no more
2155 * delayed refs will be accounted by btrfs_qgroup_account_ref.
2156 * However, btrfs_qgroup_account_ref may be right after its call
2157 * to btrfs_find_all_roots, in which case it would still do the
2159 * To solve this, we're committing the transaction, which will
2160 * ensure we run all delayed refs and only after that, we are
2161 * going to clear all tracking information for a clean start.
2164 trans = btrfs_join_transaction(fs_info->fs_root);
2165 if (IS_ERR(trans)) {
2166 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2167 return PTR_ERR(trans);
2169 ret = btrfs_commit_transaction(trans, fs_info->fs_root);
2171 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2175 qgroup_rescan_zero_tracking(fs_info);
2177 btrfs_queue_worker(&fs_info->qgroup_rescan_workers,
2178 &fs_info->qgroup_rescan_work);
2183 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info)
2188 mutex_lock(&fs_info->qgroup_rescan_lock);
2189 spin_lock(&fs_info->qgroup_lock);
2190 running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2191 spin_unlock(&fs_info->qgroup_lock);
2192 mutex_unlock(&fs_info->qgroup_rescan_lock);
2195 ret = wait_for_completion_interruptible(
2196 &fs_info->qgroup_rescan_completion);
2202 * this is only called from open_ctree where we're still single threaded, thus
2203 * locking is omitted here.
2206 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
2208 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2209 btrfs_queue_worker(&fs_info->qgroup_rescan_workers,
2210 &fs_info->qgroup_rescan_work);