2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include <linux/btrfs.h>
29 #include "transaction.h"
34 #include "extent_io.h"
37 * - subvol delete -> delete when ref goes to 0? delete limits also?
41 * - copy also limits on subvol creation
43 * - caches fuer ulists
44 * - performance benchmarks
45 * - check all ioctl parameters
49 * one struct for each qgroup, organized in fs_info->qgroup_tree.
57 u64 rfer; /* referenced */
58 u64 rfer_cmpr; /* referenced compressed */
59 u64 excl; /* exclusive */
60 u64 excl_cmpr; /* exclusive compressed */
65 u64 lim_flags; /* which limits are set */
72 * reservation tracking
79 struct list_head groups; /* groups this group is member of */
80 struct list_head members; /* groups that are members of this group */
81 struct list_head dirty; /* dirty groups */
82 struct rb_node node; /* tree of qgroups */
85 * temp variables for accounting operations
92 * glue structure to represent the relations between qgroups.
94 struct btrfs_qgroup_list {
95 struct list_head next_group;
96 struct list_head next_member;
97 struct btrfs_qgroup *group;
98 struct btrfs_qgroup *member;
102 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
104 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
106 /* must be called with qgroup_ioctl_lock held */
107 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
110 struct rb_node *n = fs_info->qgroup_tree.rb_node;
111 struct btrfs_qgroup *qgroup;
114 qgroup = rb_entry(n, struct btrfs_qgroup, node);
115 if (qgroup->qgroupid < qgroupid)
117 else if (qgroup->qgroupid > qgroupid)
125 /* must be called with qgroup_lock held */
126 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
129 struct rb_node **p = &fs_info->qgroup_tree.rb_node;
130 struct rb_node *parent = NULL;
131 struct btrfs_qgroup *qgroup;
135 qgroup = rb_entry(parent, struct btrfs_qgroup, node);
137 if (qgroup->qgroupid < qgroupid)
139 else if (qgroup->qgroupid > qgroupid)
145 qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
147 return ERR_PTR(-ENOMEM);
149 qgroup->qgroupid = qgroupid;
150 INIT_LIST_HEAD(&qgroup->groups);
151 INIT_LIST_HEAD(&qgroup->members);
152 INIT_LIST_HEAD(&qgroup->dirty);
154 rb_link_node(&qgroup->node, parent, p);
155 rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
160 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
162 struct btrfs_qgroup_list *list;
164 list_del(&qgroup->dirty);
165 while (!list_empty(&qgroup->groups)) {
166 list = list_first_entry(&qgroup->groups,
167 struct btrfs_qgroup_list, next_group);
168 list_del(&list->next_group);
169 list_del(&list->next_member);
173 while (!list_empty(&qgroup->members)) {
174 list = list_first_entry(&qgroup->members,
175 struct btrfs_qgroup_list, next_member);
176 list_del(&list->next_group);
177 list_del(&list->next_member);
183 /* must be called with qgroup_lock held */
184 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
186 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
191 rb_erase(&qgroup->node, &fs_info->qgroup_tree);
192 __del_qgroup_rb(qgroup);
196 /* must be called with qgroup_lock held */
197 static int add_relation_rb(struct btrfs_fs_info *fs_info,
198 u64 memberid, u64 parentid)
200 struct btrfs_qgroup *member;
201 struct btrfs_qgroup *parent;
202 struct btrfs_qgroup_list *list;
204 member = find_qgroup_rb(fs_info, memberid);
205 parent = find_qgroup_rb(fs_info, parentid);
206 if (!member || !parent)
209 list = kzalloc(sizeof(*list), GFP_ATOMIC);
213 list->group = parent;
214 list->member = member;
215 list_add_tail(&list->next_group, &member->groups);
216 list_add_tail(&list->next_member, &parent->members);
221 /* must be called with qgroup_lock held */
222 static int del_relation_rb(struct btrfs_fs_info *fs_info,
223 u64 memberid, u64 parentid)
225 struct btrfs_qgroup *member;
226 struct btrfs_qgroup *parent;
227 struct btrfs_qgroup_list *list;
229 member = find_qgroup_rb(fs_info, memberid);
230 parent = find_qgroup_rb(fs_info, parentid);
231 if (!member || !parent)
234 list_for_each_entry(list, &member->groups, next_group) {
235 if (list->group == parent) {
236 list_del(&list->next_group);
237 list_del(&list->next_member);
246 * The full config is read in one go, only called from open_ctree()
247 * It doesn't use any locking, as at this point we're still single-threaded
249 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
251 struct btrfs_key key;
252 struct btrfs_key found_key;
253 struct btrfs_root *quota_root = fs_info->quota_root;
254 struct btrfs_path *path = NULL;
255 struct extent_buffer *l;
259 u64 rescan_progress = 0;
261 if (!fs_info->quota_enabled)
264 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
265 if (!fs_info->qgroup_ulist) {
270 path = btrfs_alloc_path();
276 /* default this to quota off, in case no status key is found */
277 fs_info->qgroup_flags = 0;
280 * pass 1: read status, all qgroup infos and limits
285 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
290 struct btrfs_qgroup *qgroup;
292 slot = path->slots[0];
294 btrfs_item_key_to_cpu(l, &found_key, slot);
296 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
297 struct btrfs_qgroup_status_item *ptr;
299 ptr = btrfs_item_ptr(l, slot,
300 struct btrfs_qgroup_status_item);
302 if (btrfs_qgroup_status_version(l, ptr) !=
303 BTRFS_QGROUP_STATUS_VERSION) {
305 "btrfs: old qgroup version, quota disabled\n");
308 if (btrfs_qgroup_status_generation(l, ptr) !=
309 fs_info->generation) {
310 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
312 "btrfs: qgroup generation mismatch, "
313 "marked as inconsistent\n");
315 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
317 rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
321 if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
322 found_key.type != BTRFS_QGROUP_LIMIT_KEY)
325 qgroup = find_qgroup_rb(fs_info, found_key.offset);
326 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
327 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
328 printk(KERN_ERR "btrfs: inconsitent qgroup config\n");
329 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
332 qgroup = add_qgroup_rb(fs_info, found_key.offset);
333 if (IS_ERR(qgroup)) {
334 ret = PTR_ERR(qgroup);
338 switch (found_key.type) {
339 case BTRFS_QGROUP_INFO_KEY: {
340 struct btrfs_qgroup_info_item *ptr;
342 ptr = btrfs_item_ptr(l, slot,
343 struct btrfs_qgroup_info_item);
344 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
345 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
346 qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
347 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
348 /* generation currently unused */
351 case BTRFS_QGROUP_LIMIT_KEY: {
352 struct btrfs_qgroup_limit_item *ptr;
354 ptr = btrfs_item_ptr(l, slot,
355 struct btrfs_qgroup_limit_item);
356 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
357 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
358 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
359 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
360 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
365 ret = btrfs_next_item(quota_root, path);
371 btrfs_release_path(path);
374 * pass 2: read all qgroup relations
377 key.type = BTRFS_QGROUP_RELATION_KEY;
379 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
383 slot = path->slots[0];
385 btrfs_item_key_to_cpu(l, &found_key, slot);
387 if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
390 if (found_key.objectid > found_key.offset) {
391 /* parent <- member, not needed to build config */
392 /* FIXME should we omit the key completely? */
396 ret = add_relation_rb(fs_info, found_key.objectid,
398 if (ret == -ENOENT) {
400 "btrfs: orphan qgroup relation 0x%llx->0x%llx\n",
401 (unsigned long long)found_key.objectid,
402 (unsigned long long)found_key.offset);
403 ret = 0; /* ignore the error */
408 ret = btrfs_next_item(quota_root, path);
415 fs_info->qgroup_flags |= flags;
416 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
417 fs_info->quota_enabled = 0;
418 fs_info->pending_quota_state = 0;
419 } else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
421 ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
423 btrfs_free_path(path);
426 ulist_free(fs_info->qgroup_ulist);
427 fs_info->qgroup_ulist = NULL;
428 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
431 return ret < 0 ? ret : 0;
435 * This is only called from close_ctree() or open_ctree(), both in single-
436 * treaded paths. Clean up the in-memory structures. No locking needed.
438 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
441 struct btrfs_qgroup *qgroup;
443 while ((n = rb_first(&fs_info->qgroup_tree))) {
444 qgroup = rb_entry(n, struct btrfs_qgroup, node);
445 rb_erase(n, &fs_info->qgroup_tree);
446 __del_qgroup_rb(qgroup);
449 * we call btrfs_free_qgroup_config() when umounting
450 * filesystem and disabling quota, so we set qgroup_ulit
451 * to be null here to avoid double free.
453 ulist_free(fs_info->qgroup_ulist);
454 fs_info->qgroup_ulist = NULL;
457 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
458 struct btrfs_root *quota_root,
462 struct btrfs_path *path;
463 struct btrfs_key key;
465 path = btrfs_alloc_path();
470 key.type = BTRFS_QGROUP_RELATION_KEY;
473 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
475 btrfs_mark_buffer_dirty(path->nodes[0]);
477 btrfs_free_path(path);
481 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
482 struct btrfs_root *quota_root,
486 struct btrfs_path *path;
487 struct btrfs_key key;
489 path = btrfs_alloc_path();
494 key.type = BTRFS_QGROUP_RELATION_KEY;
497 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
506 ret = btrfs_del_item(trans, quota_root, path);
508 btrfs_free_path(path);
512 static int add_qgroup_item(struct btrfs_trans_handle *trans,
513 struct btrfs_root *quota_root, u64 qgroupid)
516 struct btrfs_path *path;
517 struct btrfs_qgroup_info_item *qgroup_info;
518 struct btrfs_qgroup_limit_item *qgroup_limit;
519 struct extent_buffer *leaf;
520 struct btrfs_key key;
522 path = btrfs_alloc_path();
527 key.type = BTRFS_QGROUP_INFO_KEY;
528 key.offset = qgroupid;
530 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
531 sizeof(*qgroup_info));
535 leaf = path->nodes[0];
536 qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
537 struct btrfs_qgroup_info_item);
538 btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
539 btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
540 btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
541 btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
542 btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
544 btrfs_mark_buffer_dirty(leaf);
546 btrfs_release_path(path);
548 key.type = BTRFS_QGROUP_LIMIT_KEY;
549 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
550 sizeof(*qgroup_limit));
554 leaf = path->nodes[0];
555 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
556 struct btrfs_qgroup_limit_item);
557 btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
558 btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
559 btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
560 btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
561 btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
563 btrfs_mark_buffer_dirty(leaf);
567 btrfs_free_path(path);
571 static int del_qgroup_item(struct btrfs_trans_handle *trans,
572 struct btrfs_root *quota_root, u64 qgroupid)
575 struct btrfs_path *path;
576 struct btrfs_key key;
578 path = btrfs_alloc_path();
583 key.type = BTRFS_QGROUP_INFO_KEY;
584 key.offset = qgroupid;
585 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
594 ret = btrfs_del_item(trans, quota_root, path);
598 btrfs_release_path(path);
600 key.type = BTRFS_QGROUP_LIMIT_KEY;
601 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
610 ret = btrfs_del_item(trans, quota_root, path);
613 btrfs_free_path(path);
617 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
618 struct btrfs_root *root, u64 qgroupid,
619 u64 flags, u64 max_rfer, u64 max_excl,
620 u64 rsv_rfer, u64 rsv_excl)
622 struct btrfs_path *path;
623 struct btrfs_key key;
624 struct extent_buffer *l;
625 struct btrfs_qgroup_limit_item *qgroup_limit;
630 key.type = BTRFS_QGROUP_LIMIT_KEY;
631 key.offset = qgroupid;
633 path = btrfs_alloc_path();
637 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
645 slot = path->slots[0];
646 qgroup_limit = btrfs_item_ptr(l, path->slots[0],
647 struct btrfs_qgroup_limit_item);
648 btrfs_set_qgroup_limit_flags(l, qgroup_limit, flags);
649 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, max_rfer);
650 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, max_excl);
651 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, rsv_rfer);
652 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, rsv_excl);
654 btrfs_mark_buffer_dirty(l);
657 btrfs_free_path(path);
661 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
662 struct btrfs_root *root,
663 struct btrfs_qgroup *qgroup)
665 struct btrfs_path *path;
666 struct btrfs_key key;
667 struct extent_buffer *l;
668 struct btrfs_qgroup_info_item *qgroup_info;
673 key.type = BTRFS_QGROUP_INFO_KEY;
674 key.offset = qgroup->qgroupid;
676 path = btrfs_alloc_path();
680 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
688 slot = path->slots[0];
689 qgroup_info = btrfs_item_ptr(l, path->slots[0],
690 struct btrfs_qgroup_info_item);
691 btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
692 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
693 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
694 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
695 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
697 btrfs_mark_buffer_dirty(l);
700 btrfs_free_path(path);
704 static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
705 struct btrfs_fs_info *fs_info,
706 struct btrfs_root *root)
708 struct btrfs_path *path;
709 struct btrfs_key key;
710 struct extent_buffer *l;
711 struct btrfs_qgroup_status_item *ptr;
716 key.type = BTRFS_QGROUP_STATUS_KEY;
719 path = btrfs_alloc_path();
723 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
731 slot = path->slots[0];
732 ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
733 btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
734 btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
735 btrfs_set_qgroup_status_rescan(l, ptr,
736 fs_info->qgroup_rescan_progress.objectid);
738 btrfs_mark_buffer_dirty(l);
741 btrfs_free_path(path);
746 * called with qgroup_lock held
748 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
749 struct btrfs_root *root)
751 struct btrfs_path *path;
752 struct btrfs_key key;
753 struct extent_buffer *leaf = NULL;
757 path = btrfs_alloc_path();
761 path->leave_spinning = 1;
768 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
771 leaf = path->nodes[0];
772 nr = btrfs_header_nritems(leaf);
776 * delete the leaf one by one
777 * since the whole tree is going
781 ret = btrfs_del_items(trans, root, path, 0, nr);
785 btrfs_release_path(path);
789 root->fs_info->pending_quota_state = 0;
790 btrfs_free_path(path);
794 int btrfs_quota_enable(struct btrfs_trans_handle *trans,
795 struct btrfs_fs_info *fs_info)
797 struct btrfs_root *quota_root;
798 struct btrfs_root *tree_root = fs_info->tree_root;
799 struct btrfs_path *path = NULL;
800 struct btrfs_qgroup_status_item *ptr;
801 struct extent_buffer *leaf;
802 struct btrfs_key key;
803 struct btrfs_key found_key;
804 struct btrfs_qgroup *qgroup = NULL;
808 mutex_lock(&fs_info->qgroup_ioctl_lock);
809 if (fs_info->quota_root) {
810 fs_info->pending_quota_state = 1;
814 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
815 if (!fs_info->qgroup_ulist) {
821 * initially create the quota tree
823 quota_root = btrfs_create_tree(trans, fs_info,
824 BTRFS_QUOTA_TREE_OBJECTID);
825 if (IS_ERR(quota_root)) {
826 ret = PTR_ERR(quota_root);
830 path = btrfs_alloc_path();
837 key.type = BTRFS_QGROUP_STATUS_KEY;
840 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
845 leaf = path->nodes[0];
846 ptr = btrfs_item_ptr(leaf, path->slots[0],
847 struct btrfs_qgroup_status_item);
848 btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
849 btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
850 fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
851 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
852 btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
853 btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
855 btrfs_mark_buffer_dirty(leaf);
858 key.type = BTRFS_ROOT_REF_KEY;
861 btrfs_release_path(path);
862 ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
870 slot = path->slots[0];
871 leaf = path->nodes[0];
872 btrfs_item_key_to_cpu(leaf, &found_key, slot);
874 if (found_key.type == BTRFS_ROOT_REF_KEY) {
875 ret = add_qgroup_item(trans, quota_root,
880 qgroup = add_qgroup_rb(fs_info, found_key.offset);
881 if (IS_ERR(qgroup)) {
882 ret = PTR_ERR(qgroup);
886 ret = btrfs_next_item(tree_root, path);
894 btrfs_release_path(path);
895 ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
899 qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
900 if (IS_ERR(qgroup)) {
901 ret = PTR_ERR(qgroup);
904 spin_lock(&fs_info->qgroup_lock);
905 fs_info->quota_root = quota_root;
906 fs_info->pending_quota_state = 1;
907 spin_unlock(&fs_info->qgroup_lock);
909 btrfs_free_path(path);
912 free_extent_buffer(quota_root->node);
913 free_extent_buffer(quota_root->commit_root);
918 ulist_free(fs_info->qgroup_ulist);
919 fs_info->qgroup_ulist = NULL;
921 mutex_unlock(&fs_info->qgroup_ioctl_lock);
925 int btrfs_quota_disable(struct btrfs_trans_handle *trans,
926 struct btrfs_fs_info *fs_info)
928 struct btrfs_root *tree_root = fs_info->tree_root;
929 struct btrfs_root *quota_root;
932 mutex_lock(&fs_info->qgroup_ioctl_lock);
933 if (!fs_info->quota_root)
935 spin_lock(&fs_info->qgroup_lock);
936 fs_info->quota_enabled = 0;
937 fs_info->pending_quota_state = 0;
938 quota_root = fs_info->quota_root;
939 fs_info->quota_root = NULL;
940 btrfs_free_qgroup_config(fs_info);
941 spin_unlock(&fs_info->qgroup_lock);
948 ret = btrfs_clean_quota_tree(trans, quota_root);
952 ret = btrfs_del_root(trans, tree_root, "a_root->root_key);
956 list_del("a_root->dirty_list);
958 btrfs_tree_lock(quota_root->node);
959 clean_tree_block(trans, tree_root, quota_root->node);
960 btrfs_tree_unlock(quota_root->node);
961 btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
963 free_extent_buffer(quota_root->node);
964 free_extent_buffer(quota_root->commit_root);
967 mutex_unlock(&fs_info->qgroup_ioctl_lock);
971 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
972 struct btrfs_qgroup *qgroup)
974 if (list_empty(&qgroup->dirty))
975 list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
978 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
979 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
981 struct btrfs_root *quota_root;
982 struct btrfs_qgroup *parent;
983 struct btrfs_qgroup *member;
984 struct btrfs_qgroup_list *list;
987 mutex_lock(&fs_info->qgroup_ioctl_lock);
988 quota_root = fs_info->quota_root;
993 member = find_qgroup_rb(fs_info, src);
994 parent = find_qgroup_rb(fs_info, dst);
995 if (!member || !parent) {
1000 /* check if such qgroup relation exist firstly */
1001 list_for_each_entry(list, &member->groups, next_group) {
1002 if (list->group == parent) {
1008 ret = add_qgroup_relation_item(trans, quota_root, src, dst);
1012 ret = add_qgroup_relation_item(trans, quota_root, dst, src);
1014 del_qgroup_relation_item(trans, quota_root, src, dst);
1018 spin_lock(&fs_info->qgroup_lock);
1019 ret = add_relation_rb(quota_root->fs_info, src, dst);
1020 spin_unlock(&fs_info->qgroup_lock);
1022 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1026 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
1027 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1029 struct btrfs_root *quota_root;
1030 struct btrfs_qgroup *parent;
1031 struct btrfs_qgroup *member;
1032 struct btrfs_qgroup_list *list;
1036 mutex_lock(&fs_info->qgroup_ioctl_lock);
1037 quota_root = fs_info->quota_root;
1043 member = find_qgroup_rb(fs_info, src);
1044 parent = find_qgroup_rb(fs_info, dst);
1045 if (!member || !parent) {
1050 /* check if such qgroup relation exist firstly */
1051 list_for_each_entry(list, &member->groups, next_group) {
1052 if (list->group == parent)
1058 ret = del_qgroup_relation_item(trans, quota_root, src, dst);
1059 err = del_qgroup_relation_item(trans, quota_root, dst, src);
1063 spin_lock(&fs_info->qgroup_lock);
1064 del_relation_rb(fs_info, src, dst);
1065 spin_unlock(&fs_info->qgroup_lock);
1067 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1071 int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
1072 struct btrfs_fs_info *fs_info, u64 qgroupid, char *name)
1074 struct btrfs_root *quota_root;
1075 struct btrfs_qgroup *qgroup;
1078 mutex_lock(&fs_info->qgroup_ioctl_lock);
1079 quota_root = fs_info->quota_root;
1084 qgroup = find_qgroup_rb(fs_info, qgroupid);
1090 ret = add_qgroup_item(trans, quota_root, qgroupid);
1094 spin_lock(&fs_info->qgroup_lock);
1095 qgroup = add_qgroup_rb(fs_info, qgroupid);
1096 spin_unlock(&fs_info->qgroup_lock);
1099 ret = PTR_ERR(qgroup);
1101 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1105 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
1106 struct btrfs_fs_info *fs_info, u64 qgroupid)
1108 struct btrfs_root *quota_root;
1109 struct btrfs_qgroup *qgroup;
1112 mutex_lock(&fs_info->qgroup_ioctl_lock);
1113 quota_root = fs_info->quota_root;
1119 qgroup = find_qgroup_rb(fs_info, qgroupid);
1124 /* check if there are no relations to this qgroup */
1125 if (!list_empty(&qgroup->groups) ||
1126 !list_empty(&qgroup->members)) {
1131 ret = del_qgroup_item(trans, quota_root, qgroupid);
1133 spin_lock(&fs_info->qgroup_lock);
1134 del_qgroup_rb(quota_root->fs_info, qgroupid);
1135 spin_unlock(&fs_info->qgroup_lock);
1137 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1141 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1142 struct btrfs_fs_info *fs_info, u64 qgroupid,
1143 struct btrfs_qgroup_limit *limit)
1145 struct btrfs_root *quota_root;
1146 struct btrfs_qgroup *qgroup;
1149 mutex_lock(&fs_info->qgroup_ioctl_lock);
1150 quota_root = fs_info->quota_root;
1156 qgroup = find_qgroup_rb(fs_info, qgroupid);
1161 ret = update_qgroup_limit_item(trans, quota_root, qgroupid,
1162 limit->flags, limit->max_rfer,
1163 limit->max_excl, limit->rsv_rfer,
1166 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1167 printk(KERN_INFO "unable to update quota limit for %llu\n",
1168 (unsigned long long)qgroupid);
1171 spin_lock(&fs_info->qgroup_lock);
1172 qgroup->lim_flags = limit->flags;
1173 qgroup->max_rfer = limit->max_rfer;
1174 qgroup->max_excl = limit->max_excl;
1175 qgroup->rsv_rfer = limit->rsv_rfer;
1176 qgroup->rsv_excl = limit->rsv_excl;
1177 spin_unlock(&fs_info->qgroup_lock);
1179 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1184 * btrfs_qgroup_record_ref is called when the ref is added or deleted. it puts
1185 * the modification into a list that's later used by btrfs_end_transaction to
1186 * pass the recorded modifications on to btrfs_qgroup_account_ref.
1188 int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
1189 struct btrfs_delayed_ref_node *node,
1190 struct btrfs_delayed_extent_op *extent_op)
1192 struct qgroup_update *u;
1194 BUG_ON(!trans->delayed_ref_elem.seq);
1195 u = kmalloc(sizeof(*u), GFP_NOFS);
1200 u->extent_op = extent_op;
1201 list_add_tail(&u->list, &trans->qgroup_ref_list);
1206 static int qgroup_account_ref_step1(struct btrfs_fs_info *fs_info,
1207 struct ulist *roots, struct ulist *tmp,
1210 struct ulist_node *unode;
1211 struct ulist_iterator uiter;
1212 struct ulist_node *tmp_unode;
1213 struct ulist_iterator tmp_uiter;
1214 struct btrfs_qgroup *qg;
1217 ULIST_ITER_INIT(&uiter);
1218 while ((unode = ulist_next(roots, &uiter))) {
1219 qg = find_qgroup_rb(fs_info, unode->val);
1224 /* XXX id not needed */
1225 ret = ulist_add(tmp, qg->qgroupid,
1226 (u64)(uintptr_t)qg, GFP_ATOMIC);
1229 ULIST_ITER_INIT(&tmp_uiter);
1230 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1231 struct btrfs_qgroup_list *glist;
1233 qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
1234 if (qg->refcnt < seq)
1235 qg->refcnt = seq + 1;
1239 list_for_each_entry(glist, &qg->groups, next_group) {
1240 ret = ulist_add(tmp, glist->group->qgroupid,
1241 (u64)(uintptr_t)glist->group,
1252 static int qgroup_account_ref_step2(struct btrfs_fs_info *fs_info,
1253 struct ulist *roots, struct ulist *tmp,
1254 u64 seq, int sgn, u64 num_bytes,
1255 struct btrfs_qgroup *qgroup)
1257 struct ulist_node *unode;
1258 struct ulist_iterator uiter;
1259 struct btrfs_qgroup *qg;
1260 struct btrfs_qgroup_list *glist;
1264 ret = ulist_add(tmp, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
1268 ULIST_ITER_INIT(&uiter);
1269 while ((unode = ulist_next(tmp, &uiter))) {
1270 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
1271 if (qg->refcnt < seq) {
1272 /* not visited by step 1 */
1273 qg->rfer += sgn * num_bytes;
1274 qg->rfer_cmpr += sgn * num_bytes;
1275 if (roots->nnodes == 0) {
1276 qg->excl += sgn * num_bytes;
1277 qg->excl_cmpr += sgn * num_bytes;
1279 qgroup_dirty(fs_info, qg);
1281 WARN_ON(qg->tag >= seq);
1284 list_for_each_entry(glist, &qg->groups, next_group) {
1285 ret = ulist_add(tmp, glist->group->qgroupid,
1286 (uintptr_t)glist->group, GFP_ATOMIC);
1295 static int qgroup_account_ref_step3(struct btrfs_fs_info *fs_info,
1296 struct ulist *roots, struct ulist *tmp,
1297 u64 seq, int sgn, u64 num_bytes)
1299 struct ulist_node *unode;
1300 struct ulist_iterator uiter;
1301 struct btrfs_qgroup *qg;
1302 struct ulist_node *tmp_unode;
1303 struct ulist_iterator tmp_uiter;
1306 ULIST_ITER_INIT(&uiter);
1307 while ((unode = ulist_next(roots, &uiter))) {
1308 qg = find_qgroup_rb(fs_info, unode->val);
1313 ret = ulist_add(tmp, qg->qgroupid, (uintptr_t)qg, GFP_ATOMIC);
1317 ULIST_ITER_INIT(&tmp_uiter);
1318 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1319 struct btrfs_qgroup_list *glist;
1321 qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
1325 if (qg->refcnt - seq == roots->nnodes) {
1326 qg->excl -= sgn * num_bytes;
1327 qg->excl_cmpr -= sgn * num_bytes;
1328 qgroup_dirty(fs_info, qg);
1331 list_for_each_entry(glist, &qg->groups, next_group) {
1332 ret = ulist_add(tmp, glist->group->qgroupid,
1333 (uintptr_t)glist->group,
1345 * btrfs_qgroup_account_ref is called for every ref that is added to or deleted
1346 * from the fs. First, all roots referencing the extent are searched, and
1347 * then the space is accounted accordingly to the different roots. The
1348 * accounting algorithm works in 3 steps documented inline.
1350 int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
1351 struct btrfs_fs_info *fs_info,
1352 struct btrfs_delayed_ref_node *node,
1353 struct btrfs_delayed_extent_op *extent_op)
1355 struct btrfs_key ins;
1356 struct btrfs_root *quota_root;
1358 struct btrfs_qgroup *qgroup;
1359 struct ulist *roots = NULL;
1364 if (!fs_info->quota_enabled)
1367 BUG_ON(!fs_info->quota_root);
1369 ins.objectid = node->bytenr;
1370 ins.offset = node->num_bytes;
1371 ins.type = BTRFS_EXTENT_ITEM_KEY;
1373 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1374 node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
1375 struct btrfs_delayed_tree_ref *ref;
1376 ref = btrfs_delayed_node_to_tree_ref(node);
1377 ref_root = ref->root;
1378 } else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1379 node->type == BTRFS_SHARED_DATA_REF_KEY) {
1380 struct btrfs_delayed_data_ref *ref;
1381 ref = btrfs_delayed_node_to_data_ref(node);
1382 ref_root = ref->root;
1387 if (!is_fstree(ref_root)) {
1389 * non-fs-trees are not being accounted
1394 switch (node->action) {
1395 case BTRFS_ADD_DELAYED_REF:
1396 case BTRFS_ADD_DELAYED_EXTENT:
1398 seq = btrfs_tree_mod_seq_prev(node->seq);
1400 case BTRFS_DROP_DELAYED_REF:
1404 case BTRFS_UPDATE_DELAYED_HEAD:
1410 mutex_lock(&fs_info->qgroup_rescan_lock);
1411 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
1412 if (fs_info->qgroup_rescan_progress.objectid <= node->bytenr) {
1413 mutex_unlock(&fs_info->qgroup_rescan_lock);
1417 mutex_unlock(&fs_info->qgroup_rescan_lock);
1420 * the delayed ref sequence number we pass depends on the direction of
1421 * the operation. for add operations, we pass
1422 * tree_mod_log_prev_seq(node->seq) to skip
1423 * the delayed ref's current sequence number, because we need the state
1424 * of the tree before the add operation. for delete operations, we pass
1425 * (node->seq) to include the delayed ref's current sequence number,
1426 * because we need the state of the tree after the delete operation.
1428 ret = btrfs_find_all_roots(trans, fs_info, node->bytenr, seq, &roots);
1432 spin_lock(&fs_info->qgroup_lock);
1434 quota_root = fs_info->quota_root;
1438 qgroup = find_qgroup_rb(fs_info, ref_root);
1443 * step 1: for each old ref, visit all nodes once and inc refcnt
1445 ulist_reinit(fs_info->qgroup_ulist);
1446 seq = fs_info->qgroup_seq;
1447 fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
1449 ret = qgroup_account_ref_step1(fs_info, roots, fs_info->qgroup_ulist,
1455 * step 2: walk from the new root
1457 ret = qgroup_account_ref_step2(fs_info, roots, fs_info->qgroup_ulist,
1458 seq, sgn, node->num_bytes, qgroup);
1463 * step 3: walk again from old refs
1465 ret = qgroup_account_ref_step3(fs_info, roots, fs_info->qgroup_ulist,
1466 seq, sgn, node->num_bytes);
1471 spin_unlock(&fs_info->qgroup_lock);
1478 * called from commit_transaction. Writes all changed qgroups to disk.
1480 int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
1481 struct btrfs_fs_info *fs_info)
1483 struct btrfs_root *quota_root = fs_info->quota_root;
1485 int start_rescan_worker = 0;
1490 if (!fs_info->quota_enabled && fs_info->pending_quota_state)
1491 start_rescan_worker = 1;
1493 fs_info->quota_enabled = fs_info->pending_quota_state;
1495 spin_lock(&fs_info->qgroup_lock);
1496 while (!list_empty(&fs_info->dirty_qgroups)) {
1497 struct btrfs_qgroup *qgroup;
1498 qgroup = list_first_entry(&fs_info->dirty_qgroups,
1499 struct btrfs_qgroup, dirty);
1500 list_del_init(&qgroup->dirty);
1501 spin_unlock(&fs_info->qgroup_lock);
1502 ret = update_qgroup_info_item(trans, quota_root, qgroup);
1504 fs_info->qgroup_flags |=
1505 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1506 spin_lock(&fs_info->qgroup_lock);
1508 if (fs_info->quota_enabled)
1509 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
1511 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1512 spin_unlock(&fs_info->qgroup_lock);
1514 ret = update_qgroup_status_item(trans, fs_info, quota_root);
1516 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1518 if (!ret && start_rescan_worker) {
1519 ret = qgroup_rescan_init(fs_info, 0, 1);
1521 qgroup_rescan_zero_tracking(fs_info);
1522 btrfs_queue_worker(&fs_info->qgroup_rescan_workers,
1523 &fs_info->qgroup_rescan_work);
1534 * copy the acounting information between qgroups. This is necessary when a
1535 * snapshot or a subvolume is created
1537 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1538 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
1539 struct btrfs_qgroup_inherit *inherit)
1544 struct btrfs_root *quota_root = fs_info->quota_root;
1545 struct btrfs_qgroup *srcgroup;
1546 struct btrfs_qgroup *dstgroup;
1550 mutex_lock(&fs_info->qgroup_ioctl_lock);
1551 if (!fs_info->quota_enabled)
1560 i_qgroups = (u64 *)(inherit + 1);
1561 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
1562 2 * inherit->num_excl_copies;
1563 for (i = 0; i < nums; ++i) {
1564 srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
1574 * create a tracking group for the subvol itself
1576 ret = add_qgroup_item(trans, quota_root, objectid);
1580 if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
1581 ret = update_qgroup_limit_item(trans, quota_root, objectid,
1583 inherit->lim.max_rfer,
1584 inherit->lim.max_excl,
1585 inherit->lim.rsv_rfer,
1586 inherit->lim.rsv_excl);
1592 struct btrfs_root *srcroot;
1593 struct btrfs_key srckey;
1596 srckey.objectid = srcid;
1597 srckey.type = BTRFS_ROOT_ITEM_KEY;
1598 srckey.offset = (u64)-1;
1599 srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
1600 if (IS_ERR(srcroot)) {
1601 ret = PTR_ERR(srcroot);
1606 srcroot_level = btrfs_header_level(srcroot->node);
1607 level_size = btrfs_level_size(srcroot, srcroot_level);
1612 * add qgroup to all inherited groups
1615 i_qgroups = (u64 *)(inherit + 1);
1616 for (i = 0; i < inherit->num_qgroups; ++i) {
1617 ret = add_qgroup_relation_item(trans, quota_root,
1618 objectid, *i_qgroups);
1621 ret = add_qgroup_relation_item(trans, quota_root,
1622 *i_qgroups, objectid);
1630 spin_lock(&fs_info->qgroup_lock);
1632 dstgroup = add_qgroup_rb(fs_info, objectid);
1633 if (IS_ERR(dstgroup)) {
1634 ret = PTR_ERR(dstgroup);
1639 srcgroup = find_qgroup_rb(fs_info, srcid);
1642 dstgroup->rfer = srcgroup->rfer - level_size;
1643 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr - level_size;
1644 srcgroup->excl = level_size;
1645 srcgroup->excl_cmpr = level_size;
1646 qgroup_dirty(fs_info, dstgroup);
1647 qgroup_dirty(fs_info, srcgroup);
1653 i_qgroups = (u64 *)(inherit + 1);
1654 for (i = 0; i < inherit->num_qgroups; ++i) {
1655 ret = add_relation_rb(quota_root->fs_info, objectid,
1662 for (i = 0; i < inherit->num_ref_copies; ++i) {
1663 struct btrfs_qgroup *src;
1664 struct btrfs_qgroup *dst;
1666 src = find_qgroup_rb(fs_info, i_qgroups[0]);
1667 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
1674 dst->rfer = src->rfer - level_size;
1675 dst->rfer_cmpr = src->rfer_cmpr - level_size;
1678 for (i = 0; i < inherit->num_excl_copies; ++i) {
1679 struct btrfs_qgroup *src;
1680 struct btrfs_qgroup *dst;
1682 src = find_qgroup_rb(fs_info, i_qgroups[0]);
1683 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
1690 dst->excl = src->excl + level_size;
1691 dst->excl_cmpr = src->excl_cmpr + level_size;
1696 spin_unlock(&fs_info->qgroup_lock);
1698 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1703 * reserve some space for a qgroup and all its parents. The reservation takes
1704 * place with start_transaction or dealloc_reserve, similar to ENOSPC
1705 * accounting. If not enough space is available, EDQUOT is returned.
1706 * We assume that the requested space is new for all qgroups.
1708 int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
1710 struct btrfs_root *quota_root;
1711 struct btrfs_qgroup *qgroup;
1712 struct btrfs_fs_info *fs_info = root->fs_info;
1713 u64 ref_root = root->root_key.objectid;
1715 struct ulist_node *unode;
1716 struct ulist_iterator uiter;
1718 if (!is_fstree(ref_root))
1724 spin_lock(&fs_info->qgroup_lock);
1725 quota_root = fs_info->quota_root;
1729 qgroup = find_qgroup_rb(fs_info, ref_root);
1734 * in a first step, we check all affected qgroups if any limits would
1737 ulist_reinit(fs_info->qgroup_ulist);
1738 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
1739 (uintptr_t)qgroup, GFP_ATOMIC);
1742 ULIST_ITER_INIT(&uiter);
1743 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
1744 struct btrfs_qgroup *qg;
1745 struct btrfs_qgroup_list *glist;
1747 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
1749 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
1750 qg->reserved + (s64)qg->rfer + num_bytes >
1756 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
1757 qg->reserved + (s64)qg->excl + num_bytes >
1763 list_for_each_entry(glist, &qg->groups, next_group) {
1764 ret = ulist_add(fs_info->qgroup_ulist,
1765 glist->group->qgroupid,
1766 (uintptr_t)glist->group, GFP_ATOMIC);
1773 * no limits exceeded, now record the reservation into all qgroups
1775 ULIST_ITER_INIT(&uiter);
1776 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
1777 struct btrfs_qgroup *qg;
1779 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
1781 qg->reserved += num_bytes;
1785 spin_unlock(&fs_info->qgroup_lock);
1789 void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
1791 struct btrfs_root *quota_root;
1792 struct btrfs_qgroup *qgroup;
1793 struct btrfs_fs_info *fs_info = root->fs_info;
1794 struct ulist_node *unode;
1795 struct ulist_iterator uiter;
1796 u64 ref_root = root->root_key.objectid;
1799 if (!is_fstree(ref_root))
1805 spin_lock(&fs_info->qgroup_lock);
1807 quota_root = fs_info->quota_root;
1811 qgroup = find_qgroup_rb(fs_info, ref_root);
1815 ulist_reinit(fs_info->qgroup_ulist);
1816 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
1817 (uintptr_t)qgroup, GFP_ATOMIC);
1820 ULIST_ITER_INIT(&uiter);
1821 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
1822 struct btrfs_qgroup *qg;
1823 struct btrfs_qgroup_list *glist;
1825 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
1827 qg->reserved -= num_bytes;
1829 list_for_each_entry(glist, &qg->groups, next_group) {
1830 ret = ulist_add(fs_info->qgroup_ulist,
1831 glist->group->qgroupid,
1832 (uintptr_t)glist->group, GFP_ATOMIC);
1839 spin_unlock(&fs_info->qgroup_lock);
1842 void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
1844 if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
1846 pr_err("btrfs: qgroups not uptodate in trans handle %p: list is%s empty, seq is %#x.%x\n",
1847 trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
1848 (u32)(trans->delayed_ref_elem.seq >> 32),
1849 (u32)trans->delayed_ref_elem.seq);
1854 * returns < 0 on error, 0 when more leafs are to be scanned.
1855 * returns 1 when done, 2 when done and FLAG_INCONSISTENT was cleared.
1858 qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1859 struct btrfs_trans_handle *trans, struct ulist *tmp,
1860 struct extent_buffer *scratch_leaf)
1862 struct btrfs_key found;
1863 struct ulist *roots = NULL;
1864 struct ulist_node *unode;
1865 struct ulist_iterator uiter;
1866 struct seq_list tree_mod_seq_elem = {};
1871 path->leave_spinning = 1;
1872 mutex_lock(&fs_info->qgroup_rescan_lock);
1873 ret = btrfs_search_slot_for_read(fs_info->extent_root,
1874 &fs_info->qgroup_rescan_progress,
1877 pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
1878 (unsigned long long)fs_info->qgroup_rescan_progress.objectid,
1879 fs_info->qgroup_rescan_progress.type,
1880 (unsigned long long)fs_info->qgroup_rescan_progress.offset,
1885 * The rescan is about to end, we will not be scanning any
1886 * further blocks. We cannot unset the RESCAN flag here, because
1887 * we want to commit the transaction if everything went well.
1888 * To make the live accounting work in this phase, we set our
1889 * scan progress pointer such that every real extent objectid
1892 fs_info->qgroup_rescan_progress.objectid = (u64)-1;
1893 btrfs_release_path(path);
1894 mutex_unlock(&fs_info->qgroup_rescan_lock);
1898 btrfs_item_key_to_cpu(path->nodes[0], &found,
1899 btrfs_header_nritems(path->nodes[0]) - 1);
1900 fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
1902 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1903 memcpy(scratch_leaf, path->nodes[0], sizeof(*scratch_leaf));
1904 slot = path->slots[0];
1905 btrfs_release_path(path);
1906 mutex_unlock(&fs_info->qgroup_rescan_lock);
1908 for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
1909 btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
1910 if (found.type != BTRFS_EXTENT_ITEM_KEY)
1912 ret = btrfs_find_all_roots(trans, fs_info, found.objectid,
1913 tree_mod_seq_elem.seq, &roots);
1916 spin_lock(&fs_info->qgroup_lock);
1917 seq = fs_info->qgroup_seq;
1918 fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
1920 ret = qgroup_account_ref_step1(fs_info, roots, tmp, seq);
1922 spin_unlock(&fs_info->qgroup_lock);
1928 * step2 of btrfs_qgroup_account_ref works from a single root,
1929 * we're doing all at once here.
1932 ULIST_ITER_INIT(&uiter);
1933 while ((unode = ulist_next(roots, &uiter))) {
1934 struct btrfs_qgroup *qg;
1936 qg = find_qgroup_rb(fs_info, unode->val);
1940 ret = ulist_add(tmp, qg->qgroupid, (uintptr_t)qg,
1943 spin_unlock(&fs_info->qgroup_lock);
1949 /* this loop is similar to step 2 of btrfs_qgroup_account_ref */
1950 ULIST_ITER_INIT(&uiter);
1951 while ((unode = ulist_next(tmp, &uiter))) {
1952 struct btrfs_qgroup *qg;
1953 struct btrfs_qgroup_list *glist;
1955 qg = (struct btrfs_qgroup *)(uintptr_t) unode->aux;
1956 qg->rfer += found.offset;
1957 qg->rfer_cmpr += found.offset;
1958 WARN_ON(qg->tag >= seq);
1959 if (qg->refcnt - seq == roots->nnodes) {
1960 qg->excl += found.offset;
1961 qg->excl_cmpr += found.offset;
1963 qgroup_dirty(fs_info, qg);
1965 list_for_each_entry(glist, &qg->groups, next_group) {
1966 ret = ulist_add(tmp, glist->group->qgroupid,
1967 (uintptr_t)glist->group,
1970 spin_unlock(&fs_info->qgroup_lock);
1977 spin_unlock(&fs_info->qgroup_lock);
1983 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1988 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
1990 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
1991 qgroup_rescan_work);
1992 struct btrfs_path *path;
1993 struct btrfs_trans_handle *trans = NULL;
1994 struct ulist *tmp = NULL;
1995 struct extent_buffer *scratch_leaf = NULL;
1998 path = btrfs_alloc_path();
2001 tmp = ulist_alloc(GFP_NOFS);
2004 scratch_leaf = kmalloc(sizeof(*scratch_leaf), GFP_NOFS);
2010 trans = btrfs_start_transaction(fs_info->fs_root, 0);
2011 if (IS_ERR(trans)) {
2012 err = PTR_ERR(trans);
2015 if (!fs_info->quota_enabled) {
2018 err = qgroup_rescan_leaf(fs_info, path, trans,
2022 btrfs_commit_transaction(trans, fs_info->fs_root);
2024 btrfs_end_transaction(trans, fs_info->fs_root);
2028 kfree(scratch_leaf);
2030 btrfs_free_path(path);
2032 mutex_lock(&fs_info->qgroup_rescan_lock);
2033 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2036 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
2037 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2038 } else if (err < 0) {
2039 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2041 mutex_unlock(&fs_info->qgroup_rescan_lock);
2044 pr_info("btrfs: qgroup scan completed%s\n",
2045 err == 2 ? " (inconsistency flag cleared)" : "");
2047 pr_err("btrfs: qgroup scan failed with %d\n", err);
2050 complete_all(&fs_info->qgroup_rescan_completion);
2054 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
2055 * memory required for the rescan context.
2058 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
2064 (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ||
2065 !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) {
2070 mutex_lock(&fs_info->qgroup_rescan_lock);
2071 spin_lock(&fs_info->qgroup_lock);
2074 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2076 else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
2080 spin_unlock(&fs_info->qgroup_lock);
2081 mutex_unlock(&fs_info->qgroup_rescan_lock);
2085 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2088 memset(&fs_info->qgroup_rescan_progress, 0,
2089 sizeof(fs_info->qgroup_rescan_progress));
2090 fs_info->qgroup_rescan_progress.objectid = progress_objectid;
2092 spin_unlock(&fs_info->qgroup_lock);
2093 mutex_unlock(&fs_info->qgroup_rescan_lock);
2095 init_completion(&fs_info->qgroup_rescan_completion);
2097 memset(&fs_info->qgroup_rescan_work, 0,
2098 sizeof(fs_info->qgroup_rescan_work));
2099 fs_info->qgroup_rescan_work.func = btrfs_qgroup_rescan_worker;
2103 pr_info("btrfs: qgroup_rescan_init failed with %d\n", ret);
2111 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
2114 struct btrfs_qgroup *qgroup;
2116 spin_lock(&fs_info->qgroup_lock);
2117 /* clear all current qgroup tracking information */
2118 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
2119 qgroup = rb_entry(n, struct btrfs_qgroup, node);
2121 qgroup->rfer_cmpr = 0;
2123 qgroup->excl_cmpr = 0;
2125 spin_unlock(&fs_info->qgroup_lock);
2129 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
2132 struct btrfs_trans_handle *trans;
2134 ret = qgroup_rescan_init(fs_info, 0, 1);
2139 * We have set the rescan_progress to 0, which means no more
2140 * delayed refs will be accounted by btrfs_qgroup_account_ref.
2141 * However, btrfs_qgroup_account_ref may be right after its call
2142 * to btrfs_find_all_roots, in which case it would still do the
2144 * To solve this, we're committing the transaction, which will
2145 * ensure we run all delayed refs and only after that, we are
2146 * going to clear all tracking information for a clean start.
2149 trans = btrfs_join_transaction(fs_info->fs_root);
2150 if (IS_ERR(trans)) {
2151 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2152 return PTR_ERR(trans);
2154 ret = btrfs_commit_transaction(trans, fs_info->fs_root);
2156 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2160 qgroup_rescan_zero_tracking(fs_info);
2162 btrfs_queue_worker(&fs_info->qgroup_rescan_workers,
2163 &fs_info->qgroup_rescan_work);
2168 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info)
2173 mutex_lock(&fs_info->qgroup_rescan_lock);
2174 spin_lock(&fs_info->qgroup_lock);
2175 running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2176 spin_unlock(&fs_info->qgroup_lock);
2177 mutex_unlock(&fs_info->qgroup_rescan_lock);
2180 ret = wait_for_completion_interruptible(
2181 &fs_info->qgroup_rescan_completion);
2187 * this is only called from open_ctree where we're still single threaded, thus
2188 * locking is omitted here.
2191 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
2193 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2194 btrfs_queue_worker(&fs_info->qgroup_rescan_workers,
2195 &fs_info->qgroup_rescan_work);