Btrfs: separate sequence numbers for delayed ref tracking and tree mod log
[firefly-linux-kernel-4.4.55.git] / fs / btrfs / qgroup.c
1 /*
2  * Copyright (C) 2011 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include <linux/btrfs.h>
27
28 #include "ctree.h"
29 #include "transaction.h"
30 #include "disk-io.h"
31 #include "locking.h"
32 #include "ulist.h"
33 #include "backref.h"
34
35 /* TODO XXX FIXME
36  *  - subvol delete -> delete when ref goes to 0? delete limits also?
37  *  - reorganize keys
38  *  - compressed
39  *  - sync
40  *  - rescan
41  *  - copy also limits on subvol creation
42  *  - limit
43  *  - caches fuer ulists
44  *  - performance benchmarks
45  *  - check all ioctl parameters
46  */
47
48 /*
49  * one struct for each qgroup, organized in fs_info->qgroup_tree.
50  */
51 struct btrfs_qgroup {
52         u64 qgroupid;
53
54         /*
55          * state
56          */
57         u64 rfer;       /* referenced */
58         u64 rfer_cmpr;  /* referenced compressed */
59         u64 excl;       /* exclusive */
60         u64 excl_cmpr;  /* exclusive compressed */
61
62         /*
63          * limits
64          */
65         u64 lim_flags;  /* which limits are set */
66         u64 max_rfer;
67         u64 max_excl;
68         u64 rsv_rfer;
69         u64 rsv_excl;
70
71         /*
72          * reservation tracking
73          */
74         u64 reserved;
75
76         /*
77          * lists
78          */
79         struct list_head groups;  /* groups this group is member of */
80         struct list_head members; /* groups that are members of this group */
81         struct list_head dirty;   /* dirty groups */
82         struct rb_node node;      /* tree of qgroups */
83
84         /*
85          * temp variables for accounting operations
86          */
87         u64 tag;
88         u64 refcnt;
89 };
90
91 /*
92  * glue structure to represent the relations between qgroups.
93  */
94 struct btrfs_qgroup_list {
95         struct list_head next_group;
96         struct list_head next_member;
97         struct btrfs_qgroup *group;
98         struct btrfs_qgroup *member;
99 };
100
101 /* must be called with qgroup_ioctl_lock held */
102 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
103                                            u64 qgroupid)
104 {
105         struct rb_node *n = fs_info->qgroup_tree.rb_node;
106         struct btrfs_qgroup *qgroup;
107
108         while (n) {
109                 qgroup = rb_entry(n, struct btrfs_qgroup, node);
110                 if (qgroup->qgroupid < qgroupid)
111                         n = n->rb_left;
112                 else if (qgroup->qgroupid > qgroupid)
113                         n = n->rb_right;
114                 else
115                         return qgroup;
116         }
117         return NULL;
118 }
119
120 /* must be called with qgroup_lock held */
121 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
122                                           u64 qgroupid)
123 {
124         struct rb_node **p = &fs_info->qgroup_tree.rb_node;
125         struct rb_node *parent = NULL;
126         struct btrfs_qgroup *qgroup;
127
128         while (*p) {
129                 parent = *p;
130                 qgroup = rb_entry(parent, struct btrfs_qgroup, node);
131
132                 if (qgroup->qgroupid < qgroupid)
133                         p = &(*p)->rb_left;
134                 else if (qgroup->qgroupid > qgroupid)
135                         p = &(*p)->rb_right;
136                 else
137                         return qgroup;
138         }
139
140         qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
141         if (!qgroup)
142                 return ERR_PTR(-ENOMEM);
143
144         qgroup->qgroupid = qgroupid;
145         INIT_LIST_HEAD(&qgroup->groups);
146         INIT_LIST_HEAD(&qgroup->members);
147         INIT_LIST_HEAD(&qgroup->dirty);
148
149         rb_link_node(&qgroup->node, parent, p);
150         rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
151
152         return qgroup;
153 }
154
155 /* must be called with qgroup_lock held */
156 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
157 {
158         struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
159         struct btrfs_qgroup_list *list;
160
161         if (!qgroup)
162                 return -ENOENT;
163
164         rb_erase(&qgroup->node, &fs_info->qgroup_tree);
165         list_del(&qgroup->dirty);
166
167         while (!list_empty(&qgroup->groups)) {
168                 list = list_first_entry(&qgroup->groups,
169                                         struct btrfs_qgroup_list, next_group);
170                 list_del(&list->next_group);
171                 list_del(&list->next_member);
172                 kfree(list);
173         }
174
175         while (!list_empty(&qgroup->members)) {
176                 list = list_first_entry(&qgroup->members,
177                                         struct btrfs_qgroup_list, next_member);
178                 list_del(&list->next_group);
179                 list_del(&list->next_member);
180                 kfree(list);
181         }
182         kfree(qgroup);
183
184         return 0;
185 }
186
187 /* must be called with qgroup_lock held */
188 static int add_relation_rb(struct btrfs_fs_info *fs_info,
189                            u64 memberid, u64 parentid)
190 {
191         struct btrfs_qgroup *member;
192         struct btrfs_qgroup *parent;
193         struct btrfs_qgroup_list *list;
194
195         member = find_qgroup_rb(fs_info, memberid);
196         parent = find_qgroup_rb(fs_info, parentid);
197         if (!member || !parent)
198                 return -ENOENT;
199
200         list = kzalloc(sizeof(*list), GFP_ATOMIC);
201         if (!list)
202                 return -ENOMEM;
203
204         list->group = parent;
205         list->member = member;
206         list_add_tail(&list->next_group, &member->groups);
207         list_add_tail(&list->next_member, &parent->members);
208
209         return 0;
210 }
211
212 /* must be called with qgroup_lock held */
213 static int del_relation_rb(struct btrfs_fs_info *fs_info,
214                            u64 memberid, u64 parentid)
215 {
216         struct btrfs_qgroup *member;
217         struct btrfs_qgroup *parent;
218         struct btrfs_qgroup_list *list;
219
220         member = find_qgroup_rb(fs_info, memberid);
221         parent = find_qgroup_rb(fs_info, parentid);
222         if (!member || !parent)
223                 return -ENOENT;
224
225         list_for_each_entry(list, &member->groups, next_group) {
226                 if (list->group == parent) {
227                         list_del(&list->next_group);
228                         list_del(&list->next_member);
229                         kfree(list);
230                         return 0;
231                 }
232         }
233         return -ENOENT;
234 }
235
236 /*
237  * The full config is read in one go, only called from open_ctree()
238  * It doesn't use any locking, as at this point we're still single-threaded
239  */
240 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
241 {
242         struct btrfs_key key;
243         struct btrfs_key found_key;
244         struct btrfs_root *quota_root = fs_info->quota_root;
245         struct btrfs_path *path = NULL;
246         struct extent_buffer *l;
247         int slot;
248         int ret = 0;
249         u64 flags = 0;
250
251         if (!fs_info->quota_enabled)
252                 return 0;
253
254         path = btrfs_alloc_path();
255         if (!path) {
256                 ret = -ENOMEM;
257                 goto out;
258         }
259
260         /* default this to quota off, in case no status key is found */
261         fs_info->qgroup_flags = 0;
262
263         /*
264          * pass 1: read status, all qgroup infos and limits
265          */
266         key.objectid = 0;
267         key.type = 0;
268         key.offset = 0;
269         ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
270         if (ret)
271                 goto out;
272
273         while (1) {
274                 struct btrfs_qgroup *qgroup;
275
276                 slot = path->slots[0];
277                 l = path->nodes[0];
278                 btrfs_item_key_to_cpu(l, &found_key, slot);
279
280                 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
281                         struct btrfs_qgroup_status_item *ptr;
282
283                         ptr = btrfs_item_ptr(l, slot,
284                                              struct btrfs_qgroup_status_item);
285
286                         if (btrfs_qgroup_status_version(l, ptr) !=
287                             BTRFS_QGROUP_STATUS_VERSION) {
288                                 printk(KERN_ERR
289                                  "btrfs: old qgroup version, quota disabled\n");
290                                 goto out;
291                         }
292                         if (btrfs_qgroup_status_generation(l, ptr) !=
293                             fs_info->generation) {
294                                 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
295                                 printk(KERN_ERR
296                                         "btrfs: qgroup generation mismatch, "
297                                         "marked as inconsistent\n");
298                         }
299                         fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
300                                                                           ptr);
301                         /* FIXME read scan element */
302                         goto next1;
303                 }
304
305                 if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
306                     found_key.type != BTRFS_QGROUP_LIMIT_KEY)
307                         goto next1;
308
309                 qgroup = find_qgroup_rb(fs_info, found_key.offset);
310                 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
311                     (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
312                         printk(KERN_ERR "btrfs: inconsitent qgroup config\n");
313                         flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
314                 }
315                 if (!qgroup) {
316                         qgroup = add_qgroup_rb(fs_info, found_key.offset);
317                         if (IS_ERR(qgroup)) {
318                                 ret = PTR_ERR(qgroup);
319                                 goto out;
320                         }
321                 }
322                 switch (found_key.type) {
323                 case BTRFS_QGROUP_INFO_KEY: {
324                         struct btrfs_qgroup_info_item *ptr;
325
326                         ptr = btrfs_item_ptr(l, slot,
327                                              struct btrfs_qgroup_info_item);
328                         qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
329                         qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
330                         qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
331                         qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
332                         /* generation currently unused */
333                         break;
334                 }
335                 case BTRFS_QGROUP_LIMIT_KEY: {
336                         struct btrfs_qgroup_limit_item *ptr;
337
338                         ptr = btrfs_item_ptr(l, slot,
339                                              struct btrfs_qgroup_limit_item);
340                         qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
341                         qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
342                         qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
343                         qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
344                         qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
345                         break;
346                 }
347                 }
348 next1:
349                 ret = btrfs_next_item(quota_root, path);
350                 if (ret < 0)
351                         goto out;
352                 if (ret)
353                         break;
354         }
355         btrfs_release_path(path);
356
357         /*
358          * pass 2: read all qgroup relations
359          */
360         key.objectid = 0;
361         key.type = BTRFS_QGROUP_RELATION_KEY;
362         key.offset = 0;
363         ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
364         if (ret)
365                 goto out;
366         while (1) {
367                 slot = path->slots[0];
368                 l = path->nodes[0];
369                 btrfs_item_key_to_cpu(l, &found_key, slot);
370
371                 if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
372                         goto next2;
373
374                 if (found_key.objectid > found_key.offset) {
375                         /* parent <- member, not needed to build config */
376                         /* FIXME should we omit the key completely? */
377                         goto next2;
378                 }
379
380                 ret = add_relation_rb(fs_info, found_key.objectid,
381                                       found_key.offset);
382                 if (ret == -ENOENT) {
383                         printk(KERN_WARNING
384                                 "btrfs: orphan qgroup relation 0x%llx->0x%llx\n",
385                                 (unsigned long long)found_key.objectid,
386                                 (unsigned long long)found_key.offset);
387                         ret = 0;        /* ignore the error */
388                 }
389                 if (ret)
390                         goto out;
391 next2:
392                 ret = btrfs_next_item(quota_root, path);
393                 if (ret < 0)
394                         goto out;
395                 if (ret)
396                         break;
397         }
398 out:
399         fs_info->qgroup_flags |= flags;
400         if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
401                 fs_info->quota_enabled = 0;
402                 fs_info->pending_quota_state = 0;
403         }
404         btrfs_free_path(path);
405
406         return ret < 0 ? ret : 0;
407 }
408
409 /*
410  * This is only called from close_ctree() or open_ctree(), both in single-
411  * treaded paths. Clean up the in-memory structures. No locking needed.
412  */
413 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
414 {
415         struct rb_node *n;
416         struct btrfs_qgroup *qgroup;
417         struct btrfs_qgroup_list *list;
418
419         while ((n = rb_first(&fs_info->qgroup_tree))) {
420                 qgroup = rb_entry(n, struct btrfs_qgroup, node);
421                 rb_erase(n, &fs_info->qgroup_tree);
422
423                 while (!list_empty(&qgroup->groups)) {
424                         list = list_first_entry(&qgroup->groups,
425                                                 struct btrfs_qgroup_list,
426                                                 next_group);
427                         list_del(&list->next_group);
428                         list_del(&list->next_member);
429                         kfree(list);
430                 }
431
432                 while (!list_empty(&qgroup->members)) {
433                         list = list_first_entry(&qgroup->members,
434                                                 struct btrfs_qgroup_list,
435                                                 next_member);
436                         list_del(&list->next_group);
437                         list_del(&list->next_member);
438                         kfree(list);
439                 }
440                 kfree(qgroup);
441         }
442 }
443
444 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
445                                     struct btrfs_root *quota_root,
446                                     u64 src, u64 dst)
447 {
448         int ret;
449         struct btrfs_path *path;
450         struct btrfs_key key;
451
452         path = btrfs_alloc_path();
453         if (!path)
454                 return -ENOMEM;
455
456         key.objectid = src;
457         key.type = BTRFS_QGROUP_RELATION_KEY;
458         key.offset = dst;
459
460         ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
461
462         btrfs_mark_buffer_dirty(path->nodes[0]);
463
464         btrfs_free_path(path);
465         return ret;
466 }
467
468 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
469                                     struct btrfs_root *quota_root,
470                                     u64 src, u64 dst)
471 {
472         int ret;
473         struct btrfs_path *path;
474         struct btrfs_key key;
475
476         path = btrfs_alloc_path();
477         if (!path)
478                 return -ENOMEM;
479
480         key.objectid = src;
481         key.type = BTRFS_QGROUP_RELATION_KEY;
482         key.offset = dst;
483
484         ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
485         if (ret < 0)
486                 goto out;
487
488         if (ret > 0) {
489                 ret = -ENOENT;
490                 goto out;
491         }
492
493         ret = btrfs_del_item(trans, quota_root, path);
494 out:
495         btrfs_free_path(path);
496         return ret;
497 }
498
499 static int add_qgroup_item(struct btrfs_trans_handle *trans,
500                            struct btrfs_root *quota_root, u64 qgroupid)
501 {
502         int ret;
503         struct btrfs_path *path;
504         struct btrfs_qgroup_info_item *qgroup_info;
505         struct btrfs_qgroup_limit_item *qgroup_limit;
506         struct extent_buffer *leaf;
507         struct btrfs_key key;
508
509         path = btrfs_alloc_path();
510         if (!path)
511                 return -ENOMEM;
512
513         key.objectid = 0;
514         key.type = BTRFS_QGROUP_INFO_KEY;
515         key.offset = qgroupid;
516
517         ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
518                                       sizeof(*qgroup_info));
519         if (ret)
520                 goto out;
521
522         leaf = path->nodes[0];
523         qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
524                                  struct btrfs_qgroup_info_item);
525         btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
526         btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
527         btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
528         btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
529         btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
530
531         btrfs_mark_buffer_dirty(leaf);
532
533         btrfs_release_path(path);
534
535         key.type = BTRFS_QGROUP_LIMIT_KEY;
536         ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
537                                       sizeof(*qgroup_limit));
538         if (ret)
539                 goto out;
540
541         leaf = path->nodes[0];
542         qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
543                                   struct btrfs_qgroup_limit_item);
544         btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
545         btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
546         btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
547         btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
548         btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
549
550         btrfs_mark_buffer_dirty(leaf);
551
552         ret = 0;
553 out:
554         btrfs_free_path(path);
555         return ret;
556 }
557
558 static int del_qgroup_item(struct btrfs_trans_handle *trans,
559                            struct btrfs_root *quota_root, u64 qgroupid)
560 {
561         int ret;
562         struct btrfs_path *path;
563         struct btrfs_key key;
564
565         path = btrfs_alloc_path();
566         if (!path)
567                 return -ENOMEM;
568
569         key.objectid = 0;
570         key.type = BTRFS_QGROUP_INFO_KEY;
571         key.offset = qgroupid;
572         ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
573         if (ret < 0)
574                 goto out;
575
576         if (ret > 0) {
577                 ret = -ENOENT;
578                 goto out;
579         }
580
581         ret = btrfs_del_item(trans, quota_root, path);
582         if (ret)
583                 goto out;
584
585         btrfs_release_path(path);
586
587         key.type = BTRFS_QGROUP_LIMIT_KEY;
588         ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
589         if (ret < 0)
590                 goto out;
591
592         if (ret > 0) {
593                 ret = -ENOENT;
594                 goto out;
595         }
596
597         ret = btrfs_del_item(trans, quota_root, path);
598
599 out:
600         btrfs_free_path(path);
601         return ret;
602 }
603
604 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
605                                     struct btrfs_root *root, u64 qgroupid,
606                                     u64 flags, u64 max_rfer, u64 max_excl,
607                                     u64 rsv_rfer, u64 rsv_excl)
608 {
609         struct btrfs_path *path;
610         struct btrfs_key key;
611         struct extent_buffer *l;
612         struct btrfs_qgroup_limit_item *qgroup_limit;
613         int ret;
614         int slot;
615
616         key.objectid = 0;
617         key.type = BTRFS_QGROUP_LIMIT_KEY;
618         key.offset = qgroupid;
619
620         path = btrfs_alloc_path();
621         if (!path)
622                 return -ENOMEM;
623
624         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
625         if (ret > 0)
626                 ret = -ENOENT;
627
628         if (ret)
629                 goto out;
630
631         l = path->nodes[0];
632         slot = path->slots[0];
633         qgroup_limit = btrfs_item_ptr(l, path->slots[0],
634                                       struct btrfs_qgroup_limit_item);
635         btrfs_set_qgroup_limit_flags(l, qgroup_limit, flags);
636         btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, max_rfer);
637         btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, max_excl);
638         btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, rsv_rfer);
639         btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, rsv_excl);
640
641         btrfs_mark_buffer_dirty(l);
642
643 out:
644         btrfs_free_path(path);
645         return ret;
646 }
647
648 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
649                                    struct btrfs_root *root,
650                                    struct btrfs_qgroup *qgroup)
651 {
652         struct btrfs_path *path;
653         struct btrfs_key key;
654         struct extent_buffer *l;
655         struct btrfs_qgroup_info_item *qgroup_info;
656         int ret;
657         int slot;
658
659         key.objectid = 0;
660         key.type = BTRFS_QGROUP_INFO_KEY;
661         key.offset = qgroup->qgroupid;
662
663         path = btrfs_alloc_path();
664         if (!path)
665                 return -ENOMEM;
666
667         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
668         if (ret > 0)
669                 ret = -ENOENT;
670
671         if (ret)
672                 goto out;
673
674         l = path->nodes[0];
675         slot = path->slots[0];
676         qgroup_info = btrfs_item_ptr(l, path->slots[0],
677                                  struct btrfs_qgroup_info_item);
678         btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
679         btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
680         btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
681         btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
682         btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
683
684         btrfs_mark_buffer_dirty(l);
685
686 out:
687         btrfs_free_path(path);
688         return ret;
689 }
690
691 static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
692                                      struct btrfs_fs_info *fs_info,
693                                     struct btrfs_root *root)
694 {
695         struct btrfs_path *path;
696         struct btrfs_key key;
697         struct extent_buffer *l;
698         struct btrfs_qgroup_status_item *ptr;
699         int ret;
700         int slot;
701
702         key.objectid = 0;
703         key.type = BTRFS_QGROUP_STATUS_KEY;
704         key.offset = 0;
705
706         path = btrfs_alloc_path();
707         if (!path)
708                 return -ENOMEM;
709
710         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
711         if (ret > 0)
712                 ret = -ENOENT;
713
714         if (ret)
715                 goto out;
716
717         l = path->nodes[0];
718         slot = path->slots[0];
719         ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
720         btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
721         btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
722         /* XXX scan */
723
724         btrfs_mark_buffer_dirty(l);
725
726 out:
727         btrfs_free_path(path);
728         return ret;
729 }
730
731 /*
732  * called with qgroup_lock held
733  */
734 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
735                                   struct btrfs_root *root)
736 {
737         struct btrfs_path *path;
738         struct btrfs_key key;
739         struct extent_buffer *leaf = NULL;
740         int ret;
741         int nr = 0;
742
743         path = btrfs_alloc_path();
744         if (!path)
745                 return -ENOMEM;
746
747         path->leave_spinning = 1;
748
749         key.objectid = 0;
750         key.offset = 0;
751         key.type = 0;
752
753         while (1) {
754                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
755                 if (ret < 0)
756                         goto out;
757                 leaf = path->nodes[0];
758                 nr = btrfs_header_nritems(leaf);
759                 if (!nr)
760                         break;
761                 /*
762                  * delete the leaf one by one
763                  * since the whole tree is going
764                  * to be deleted.
765                  */
766                 path->slots[0] = 0;
767                 ret = btrfs_del_items(trans, root, path, 0, nr);
768                 if (ret)
769                         goto out;
770
771                 btrfs_release_path(path);
772         }
773         ret = 0;
774 out:
775         root->fs_info->pending_quota_state = 0;
776         btrfs_free_path(path);
777         return ret;
778 }
779
780 int btrfs_quota_enable(struct btrfs_trans_handle *trans,
781                        struct btrfs_fs_info *fs_info)
782 {
783         struct btrfs_root *quota_root;
784         struct btrfs_root *tree_root = fs_info->tree_root;
785         struct btrfs_path *path = NULL;
786         struct btrfs_qgroup_status_item *ptr;
787         struct extent_buffer *leaf;
788         struct btrfs_key key;
789         struct btrfs_key found_key;
790         struct btrfs_qgroup *qgroup = NULL;
791         int ret = 0;
792         int slot;
793
794         mutex_lock(&fs_info->qgroup_ioctl_lock);
795         if (fs_info->quota_root) {
796                 fs_info->pending_quota_state = 1;
797                 goto out;
798         }
799
800         /*
801          * initially create the quota tree
802          */
803         quota_root = btrfs_create_tree(trans, fs_info,
804                                        BTRFS_QUOTA_TREE_OBJECTID);
805         if (IS_ERR(quota_root)) {
806                 ret =  PTR_ERR(quota_root);
807                 goto out;
808         }
809
810         path = btrfs_alloc_path();
811         if (!path) {
812                 ret = -ENOMEM;
813                 goto out_free_root;
814         }
815
816         key.objectid = 0;
817         key.type = BTRFS_QGROUP_STATUS_KEY;
818         key.offset = 0;
819
820         ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
821                                       sizeof(*ptr));
822         if (ret)
823                 goto out_free_path;
824
825         leaf = path->nodes[0];
826         ptr = btrfs_item_ptr(leaf, path->slots[0],
827                                  struct btrfs_qgroup_status_item);
828         btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
829         btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
830         fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
831                                 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
832         btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
833         btrfs_set_qgroup_status_scan(leaf, ptr, 0);
834
835         btrfs_mark_buffer_dirty(leaf);
836
837         key.objectid = 0;
838         key.type = BTRFS_ROOT_REF_KEY;
839         key.offset = 0;
840
841         btrfs_release_path(path);
842         ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
843         if (ret > 0)
844                 goto out_add_root;
845         if (ret < 0)
846                 goto out_free_path;
847
848
849         while (1) {
850                 slot = path->slots[0];
851                 leaf = path->nodes[0];
852                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
853
854                 if (found_key.type == BTRFS_ROOT_REF_KEY) {
855                         ret = add_qgroup_item(trans, quota_root,
856                                               found_key.offset);
857                         if (ret)
858                                 goto out_free_path;
859
860                         qgroup = add_qgroup_rb(fs_info, found_key.offset);
861                         if (IS_ERR(qgroup)) {
862                                 ret = PTR_ERR(qgroup);
863                                 goto out_free_path;
864                         }
865                 }
866                 ret = btrfs_next_item(tree_root, path);
867                 if (ret < 0)
868                         goto out_free_path;
869                 if (ret)
870                         break;
871         }
872
873 out_add_root:
874         btrfs_release_path(path);
875         ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
876         if (ret)
877                 goto out_free_path;
878
879         qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
880         if (IS_ERR(qgroup)) {
881                 ret = PTR_ERR(qgroup);
882                 goto out_free_path;
883         }
884         spin_lock(&fs_info->qgroup_lock);
885         fs_info->quota_root = quota_root;
886         fs_info->pending_quota_state = 1;
887         spin_unlock(&fs_info->qgroup_lock);
888 out_free_path:
889         btrfs_free_path(path);
890 out_free_root:
891         if (ret) {
892                 free_extent_buffer(quota_root->node);
893                 free_extent_buffer(quota_root->commit_root);
894                 kfree(quota_root);
895         }
896 out:
897         mutex_unlock(&fs_info->qgroup_ioctl_lock);
898         return ret;
899 }
900
901 int btrfs_quota_disable(struct btrfs_trans_handle *trans,
902                         struct btrfs_fs_info *fs_info)
903 {
904         struct btrfs_root *tree_root = fs_info->tree_root;
905         struct btrfs_root *quota_root;
906         int ret = 0;
907
908         mutex_lock(&fs_info->qgroup_ioctl_lock);
909         if (!fs_info->quota_root)
910                 goto out;
911         spin_lock(&fs_info->qgroup_lock);
912         fs_info->quota_enabled = 0;
913         fs_info->pending_quota_state = 0;
914         quota_root = fs_info->quota_root;
915         fs_info->quota_root = NULL;
916         btrfs_free_qgroup_config(fs_info);
917         spin_unlock(&fs_info->qgroup_lock);
918
919         if (!quota_root) {
920                 ret = -EINVAL;
921                 goto out;
922         }
923
924         ret = btrfs_clean_quota_tree(trans, quota_root);
925         if (ret)
926                 goto out;
927
928         ret = btrfs_del_root(trans, tree_root, &quota_root->root_key);
929         if (ret)
930                 goto out;
931
932         list_del(&quota_root->dirty_list);
933
934         btrfs_tree_lock(quota_root->node);
935         clean_tree_block(trans, tree_root, quota_root->node);
936         btrfs_tree_unlock(quota_root->node);
937         btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
938
939         free_extent_buffer(quota_root->node);
940         free_extent_buffer(quota_root->commit_root);
941         kfree(quota_root);
942 out:
943         mutex_unlock(&fs_info->qgroup_ioctl_lock);
944         return ret;
945 }
946
947 int btrfs_quota_rescan(struct btrfs_fs_info *fs_info)
948 {
949         /* FIXME */
950         return 0;
951 }
952
953 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
954                               struct btrfs_fs_info *fs_info, u64 src, u64 dst)
955 {
956         struct btrfs_root *quota_root;
957         struct btrfs_qgroup *parent;
958         struct btrfs_qgroup *member;
959         struct btrfs_qgroup_list *list;
960         int ret = 0;
961
962         mutex_lock(&fs_info->qgroup_ioctl_lock);
963         quota_root = fs_info->quota_root;
964         if (!quota_root) {
965                 ret = -EINVAL;
966                 goto out;
967         }
968         member = find_qgroup_rb(fs_info, src);
969         parent = find_qgroup_rb(fs_info, dst);
970         if (!member || !parent) {
971                 ret = -EINVAL;
972                 goto out;
973         }
974
975         /* check if such qgroup relation exist firstly */
976         list_for_each_entry(list, &member->groups, next_group) {
977                 if (list->group == parent) {
978                         ret = -EEXIST;
979                         goto out;
980                 }
981         }
982
983         ret = add_qgroup_relation_item(trans, quota_root, src, dst);
984         if (ret)
985                 goto out;
986
987         ret = add_qgroup_relation_item(trans, quota_root, dst, src);
988         if (ret) {
989                 del_qgroup_relation_item(trans, quota_root, src, dst);
990                 goto out;
991         }
992
993         spin_lock(&fs_info->qgroup_lock);
994         ret = add_relation_rb(quota_root->fs_info, src, dst);
995         spin_unlock(&fs_info->qgroup_lock);
996 out:
997         mutex_unlock(&fs_info->qgroup_ioctl_lock);
998         return ret;
999 }
1000
1001 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
1002                               struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1003 {
1004         struct btrfs_root *quota_root;
1005         struct btrfs_qgroup *parent;
1006         struct btrfs_qgroup *member;
1007         struct btrfs_qgroup_list *list;
1008         int ret = 0;
1009         int err;
1010
1011         mutex_lock(&fs_info->qgroup_ioctl_lock);
1012         quota_root = fs_info->quota_root;
1013         if (!quota_root) {
1014                 ret = -EINVAL;
1015                 goto out;
1016         }
1017
1018         member = find_qgroup_rb(fs_info, src);
1019         parent = find_qgroup_rb(fs_info, dst);
1020         if (!member || !parent) {
1021                 ret = -EINVAL;
1022                 goto out;
1023         }
1024
1025         /* check if such qgroup relation exist firstly */
1026         list_for_each_entry(list, &member->groups, next_group) {
1027                 if (list->group == parent)
1028                         goto exist;
1029         }
1030         ret = -ENOENT;
1031         goto out;
1032 exist:
1033         ret = del_qgroup_relation_item(trans, quota_root, src, dst);
1034         err = del_qgroup_relation_item(trans, quota_root, dst, src);
1035         if (err && !ret)
1036                 ret = err;
1037
1038         spin_lock(&fs_info->qgroup_lock);
1039         del_relation_rb(fs_info, src, dst);
1040         spin_unlock(&fs_info->qgroup_lock);
1041 out:
1042         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1043         return ret;
1044 }
1045
1046 int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
1047                         struct btrfs_fs_info *fs_info, u64 qgroupid, char *name)
1048 {
1049         struct btrfs_root *quota_root;
1050         struct btrfs_qgroup *qgroup;
1051         int ret = 0;
1052
1053         mutex_lock(&fs_info->qgroup_ioctl_lock);
1054         quota_root = fs_info->quota_root;
1055         if (!quota_root) {
1056                 ret = -EINVAL;
1057                 goto out;
1058         }
1059         qgroup = find_qgroup_rb(fs_info, qgroupid);
1060         if (qgroup) {
1061                 ret = -EEXIST;
1062                 goto out;
1063         }
1064
1065         ret = add_qgroup_item(trans, quota_root, qgroupid);
1066         if (ret)
1067                 goto out;
1068
1069         spin_lock(&fs_info->qgroup_lock);
1070         qgroup = add_qgroup_rb(fs_info, qgroupid);
1071         spin_unlock(&fs_info->qgroup_lock);
1072
1073         if (IS_ERR(qgroup))
1074                 ret = PTR_ERR(qgroup);
1075 out:
1076         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1077         return ret;
1078 }
1079
1080 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
1081                         struct btrfs_fs_info *fs_info, u64 qgroupid)
1082 {
1083         struct btrfs_root *quota_root;
1084         struct btrfs_qgroup *qgroup;
1085         int ret = 0;
1086
1087         mutex_lock(&fs_info->qgroup_ioctl_lock);
1088         quota_root = fs_info->quota_root;
1089         if (!quota_root) {
1090                 ret = -EINVAL;
1091                 goto out;
1092         }
1093
1094         qgroup = find_qgroup_rb(fs_info, qgroupid);
1095         if (!qgroup) {
1096                 ret = -ENOENT;
1097                 goto out;
1098         } else {
1099                 /* check if there are no relations to this qgroup */
1100                 if (!list_empty(&qgroup->groups) ||
1101                     !list_empty(&qgroup->members)) {
1102                         ret = -EBUSY;
1103                         goto out;
1104                 }
1105         }
1106         ret = del_qgroup_item(trans, quota_root, qgroupid);
1107
1108         spin_lock(&fs_info->qgroup_lock);
1109         del_qgroup_rb(quota_root->fs_info, qgroupid);
1110         spin_unlock(&fs_info->qgroup_lock);
1111 out:
1112         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1113         return ret;
1114 }
1115
1116 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1117                        struct btrfs_fs_info *fs_info, u64 qgroupid,
1118                        struct btrfs_qgroup_limit *limit)
1119 {
1120         struct btrfs_root *quota_root;
1121         struct btrfs_qgroup *qgroup;
1122         int ret = 0;
1123
1124         mutex_lock(&fs_info->qgroup_ioctl_lock);
1125         quota_root = fs_info->quota_root;
1126         if (!quota_root) {
1127                 ret = -EINVAL;
1128                 goto out;
1129         }
1130
1131         qgroup = find_qgroup_rb(fs_info, qgroupid);
1132         if (!qgroup) {
1133                 ret = -ENOENT;
1134                 goto out;
1135         }
1136         ret = update_qgroup_limit_item(trans, quota_root, qgroupid,
1137                                        limit->flags, limit->max_rfer,
1138                                        limit->max_excl, limit->rsv_rfer,
1139                                        limit->rsv_excl);
1140         if (ret) {
1141                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1142                 printk(KERN_INFO "unable to update quota limit for %llu\n",
1143                        (unsigned long long)qgroupid);
1144         }
1145
1146         spin_lock(&fs_info->qgroup_lock);
1147         qgroup->lim_flags = limit->flags;
1148         qgroup->max_rfer = limit->max_rfer;
1149         qgroup->max_excl = limit->max_excl;
1150         qgroup->rsv_rfer = limit->rsv_rfer;
1151         qgroup->rsv_excl = limit->rsv_excl;
1152         spin_unlock(&fs_info->qgroup_lock);
1153 out:
1154         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1155         return ret;
1156 }
1157
1158 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1159                          struct btrfs_qgroup *qgroup)
1160 {
1161         if (list_empty(&qgroup->dirty))
1162                 list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1163 }
1164
1165 /*
1166  * btrfs_qgroup_record_ref is called when the ref is added or deleted. it puts
1167  * the modification into a list that's later used by btrfs_end_transaction to
1168  * pass the recorded modifications on to btrfs_qgroup_account_ref.
1169  */
1170 int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
1171                             struct btrfs_delayed_ref_node *node,
1172                             struct btrfs_delayed_extent_op *extent_op)
1173 {
1174         struct qgroup_update *u;
1175
1176         BUG_ON(!trans->delayed_ref_elem.seq);
1177         u = kmalloc(sizeof(*u), GFP_NOFS);
1178         if (!u)
1179                 return -ENOMEM;
1180
1181         u->node = node;
1182         u->extent_op = extent_op;
1183         list_add_tail(&u->list, &trans->qgroup_ref_list);
1184
1185         return 0;
1186 }
1187
1188 /*
1189  * btrfs_qgroup_account_ref is called for every ref that is added to or deleted
1190  * from the fs. First, all roots referencing the extent are searched, and
1191  * then the space is accounted accordingly to the different roots. The
1192  * accounting algorithm works in 3 steps documented inline.
1193  */
1194 int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
1195                              struct btrfs_fs_info *fs_info,
1196                              struct btrfs_delayed_ref_node *node,
1197                              struct btrfs_delayed_extent_op *extent_op)
1198 {
1199         struct btrfs_key ins;
1200         struct btrfs_root *quota_root;
1201         u64 ref_root;
1202         struct btrfs_qgroup *qgroup;
1203         struct ulist_node *unode;
1204         struct ulist *roots = NULL;
1205         struct ulist *tmp = NULL;
1206         struct ulist_iterator uiter;
1207         u64 seq;
1208         int ret = 0;
1209         int sgn;
1210
1211         if (!fs_info->quota_enabled)
1212                 return 0;
1213
1214         BUG_ON(!fs_info->quota_root);
1215
1216         ins.objectid = node->bytenr;
1217         ins.offset = node->num_bytes;
1218         ins.type = BTRFS_EXTENT_ITEM_KEY;
1219
1220         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1221             node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
1222                 struct btrfs_delayed_tree_ref *ref;
1223                 ref = btrfs_delayed_node_to_tree_ref(node);
1224                 ref_root = ref->root;
1225         } else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1226                    node->type == BTRFS_SHARED_DATA_REF_KEY) {
1227                 struct btrfs_delayed_data_ref *ref;
1228                 ref = btrfs_delayed_node_to_data_ref(node);
1229                 ref_root = ref->root;
1230         } else {
1231                 BUG();
1232         }
1233
1234         if (!is_fstree(ref_root)) {
1235                 /*
1236                  * non-fs-trees are not being accounted
1237                  */
1238                 return 0;
1239         }
1240
1241         switch (node->action) {
1242         case BTRFS_ADD_DELAYED_REF:
1243         case BTRFS_ADD_DELAYED_EXTENT:
1244                 sgn = 1;
1245                 seq = btrfs_tree_mod_seq_prev(node->seq);
1246                 break;
1247         case BTRFS_DROP_DELAYED_REF:
1248                 sgn = -1;
1249                 seq = node->seq;
1250                 break;
1251         case BTRFS_UPDATE_DELAYED_HEAD:
1252                 return 0;
1253         default:
1254                 BUG();
1255         }
1256
1257         /*
1258          * the delayed ref sequence number we pass depends on the direction of
1259          * the operation. for add operations, we pass
1260          * tree_mod_log_prev_seq(node->seq) to skip
1261          * the delayed ref's current sequence number, because we need the state
1262          * of the tree before the add operation. for delete operations, we pass
1263          * (node->seq) to include the delayed ref's current sequence number,
1264          * because we need the state of the tree after the delete operation.
1265          */
1266         ret = btrfs_find_all_roots(trans, fs_info, node->bytenr, seq, &roots);
1267         if (ret < 0)
1268                 return ret;
1269
1270         spin_lock(&fs_info->qgroup_lock);
1271         quota_root = fs_info->quota_root;
1272         if (!quota_root)
1273                 goto unlock;
1274
1275         qgroup = find_qgroup_rb(fs_info, ref_root);
1276         if (!qgroup)
1277                 goto unlock;
1278
1279         /*
1280          * step 1: for each old ref, visit all nodes once and inc refcnt
1281          */
1282         tmp = ulist_alloc(GFP_ATOMIC);
1283         if (!tmp) {
1284                 ret = -ENOMEM;
1285                 goto unlock;
1286         }
1287         seq = fs_info->qgroup_seq;
1288         fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
1289
1290         ULIST_ITER_INIT(&uiter);
1291         while ((unode = ulist_next(roots, &uiter))) {
1292                 struct ulist_node *tmp_unode;
1293                 struct ulist_iterator tmp_uiter;
1294                 struct btrfs_qgroup *qg;
1295
1296                 qg = find_qgroup_rb(fs_info, unode->val);
1297                 if (!qg)
1298                         continue;
1299
1300                 ulist_reinit(tmp);
1301                                                 /* XXX id not needed */
1302                 ret = ulist_add(tmp, qg->qgroupid,
1303                                 (u64)(uintptr_t)qg, GFP_ATOMIC);
1304                 if (ret < 0)
1305                         goto unlock;
1306                 ULIST_ITER_INIT(&tmp_uiter);
1307                 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1308                         struct btrfs_qgroup_list *glist;
1309
1310                         qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
1311                         if (qg->refcnt < seq)
1312                                 qg->refcnt = seq + 1;
1313                         else
1314                                 ++qg->refcnt;
1315
1316                         list_for_each_entry(glist, &qg->groups, next_group) {
1317                                 ret = ulist_add(tmp, glist->group->qgroupid,
1318                                                 (u64)(uintptr_t)glist->group,
1319                                                 GFP_ATOMIC);
1320                                 if (ret < 0)
1321                                         goto unlock;
1322                         }
1323                 }
1324         }
1325
1326         /*
1327          * step 2: walk from the new root
1328          */
1329         ulist_reinit(tmp);
1330         ret = ulist_add(tmp, qgroup->qgroupid,
1331                         (uintptr_t)qgroup, GFP_ATOMIC);
1332         if (ret < 0)
1333                 goto unlock;
1334         ULIST_ITER_INIT(&uiter);
1335         while ((unode = ulist_next(tmp, &uiter))) {
1336                 struct btrfs_qgroup *qg;
1337                 struct btrfs_qgroup_list *glist;
1338
1339                 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
1340                 if (qg->refcnt < seq) {
1341                         /* not visited by step 1 */
1342                         qg->rfer += sgn * node->num_bytes;
1343                         qg->rfer_cmpr += sgn * node->num_bytes;
1344                         if (roots->nnodes == 0) {
1345                                 qg->excl += sgn * node->num_bytes;
1346                                 qg->excl_cmpr += sgn * node->num_bytes;
1347                         }
1348                         qgroup_dirty(fs_info, qg);
1349                 }
1350                 WARN_ON(qg->tag >= seq);
1351                 qg->tag = seq;
1352
1353                 list_for_each_entry(glist, &qg->groups, next_group) {
1354                         ret = ulist_add(tmp, glist->group->qgroupid,
1355                                         (uintptr_t)glist->group, GFP_ATOMIC);
1356                         if (ret < 0)
1357                                 goto unlock;
1358                 }
1359         }
1360
1361         /*
1362          * step 3: walk again from old refs
1363          */
1364         ULIST_ITER_INIT(&uiter);
1365         while ((unode = ulist_next(roots, &uiter))) {
1366                 struct btrfs_qgroup *qg;
1367                 struct ulist_node *tmp_unode;
1368                 struct ulist_iterator tmp_uiter;
1369
1370                 qg = find_qgroup_rb(fs_info, unode->val);
1371                 if (!qg)
1372                         continue;
1373
1374                 ulist_reinit(tmp);
1375                 ret = ulist_add(tmp, qg->qgroupid,
1376                                 (uintptr_t)qg, GFP_ATOMIC);
1377                 if (ret < 0)
1378                         goto unlock;
1379                 ULIST_ITER_INIT(&tmp_uiter);
1380                 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1381                         struct btrfs_qgroup_list *glist;
1382
1383                         qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
1384                         if (qg->tag == seq)
1385                                 continue;
1386
1387                         if (qg->refcnt - seq == roots->nnodes) {
1388                                 qg->excl -= sgn * node->num_bytes;
1389                                 qg->excl_cmpr -= sgn * node->num_bytes;
1390                                 qgroup_dirty(fs_info, qg);
1391                         }
1392
1393                         list_for_each_entry(glist, &qg->groups, next_group) {
1394                                 ret = ulist_add(tmp, glist->group->qgroupid,
1395                                                 (uintptr_t)glist->group,
1396                                                 GFP_ATOMIC);
1397                                 if (ret < 0)
1398                                         goto unlock;
1399                         }
1400                 }
1401         }
1402         ret = 0;
1403 unlock:
1404         spin_unlock(&fs_info->qgroup_lock);
1405         ulist_free(roots);
1406         ulist_free(tmp);
1407
1408         return ret;
1409 }
1410
1411 /*
1412  * called from commit_transaction. Writes all changed qgroups to disk.
1413  */
1414 int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
1415                       struct btrfs_fs_info *fs_info)
1416 {
1417         struct btrfs_root *quota_root = fs_info->quota_root;
1418         int ret = 0;
1419
1420         if (!quota_root)
1421                 goto out;
1422
1423         fs_info->quota_enabled = fs_info->pending_quota_state;
1424
1425         spin_lock(&fs_info->qgroup_lock);
1426         while (!list_empty(&fs_info->dirty_qgroups)) {
1427                 struct btrfs_qgroup *qgroup;
1428                 qgroup = list_first_entry(&fs_info->dirty_qgroups,
1429                                           struct btrfs_qgroup, dirty);
1430                 list_del_init(&qgroup->dirty);
1431                 spin_unlock(&fs_info->qgroup_lock);
1432                 ret = update_qgroup_info_item(trans, quota_root, qgroup);
1433                 if (ret)
1434                         fs_info->qgroup_flags |=
1435                                         BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1436                 spin_lock(&fs_info->qgroup_lock);
1437         }
1438         if (fs_info->quota_enabled)
1439                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
1440         else
1441                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1442         spin_unlock(&fs_info->qgroup_lock);
1443
1444         ret = update_qgroup_status_item(trans, fs_info, quota_root);
1445         if (ret)
1446                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1447
1448 out:
1449
1450         return ret;
1451 }
1452
1453 /*
1454  * copy the acounting information between qgroups. This is necessary when a
1455  * snapshot or a subvolume is created
1456  */
1457 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1458                          struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
1459                          struct btrfs_qgroup_inherit *inherit)
1460 {
1461         int ret = 0;
1462         int i;
1463         u64 *i_qgroups;
1464         struct btrfs_root *quota_root = fs_info->quota_root;
1465         struct btrfs_qgroup *srcgroup;
1466         struct btrfs_qgroup *dstgroup;
1467         u32 level_size = 0;
1468         u64 nums;
1469
1470         mutex_lock(&fs_info->qgroup_ioctl_lock);
1471         if (!fs_info->quota_enabled)
1472                 goto out;
1473
1474         if (!quota_root) {
1475                 ret = -EINVAL;
1476                 goto out;
1477         }
1478
1479         if (inherit) {
1480                 i_qgroups = (u64 *)(inherit + 1);
1481                 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
1482                        2 * inherit->num_excl_copies;
1483                 for (i = 0; i < nums; ++i) {
1484                         srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
1485                         if (!srcgroup) {
1486                                 ret = -EINVAL;
1487                                 goto out;
1488                         }
1489                         ++i_qgroups;
1490                 }
1491         }
1492
1493         /*
1494          * create a tracking group for the subvol itself
1495          */
1496         ret = add_qgroup_item(trans, quota_root, objectid);
1497         if (ret)
1498                 goto out;
1499
1500         if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
1501                 ret = update_qgroup_limit_item(trans, quota_root, objectid,
1502                                                inherit->lim.flags,
1503                                                inherit->lim.max_rfer,
1504                                                inherit->lim.max_excl,
1505                                                inherit->lim.rsv_rfer,
1506                                                inherit->lim.rsv_excl);
1507                 if (ret)
1508                         goto out;
1509         }
1510
1511         if (srcid) {
1512                 struct btrfs_root *srcroot;
1513                 struct btrfs_key srckey;
1514                 int srcroot_level;
1515
1516                 srckey.objectid = srcid;
1517                 srckey.type = BTRFS_ROOT_ITEM_KEY;
1518                 srckey.offset = (u64)-1;
1519                 srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
1520                 if (IS_ERR(srcroot)) {
1521                         ret = PTR_ERR(srcroot);
1522                         goto out;
1523                 }
1524
1525                 rcu_read_lock();
1526                 srcroot_level = btrfs_header_level(srcroot->node);
1527                 level_size = btrfs_level_size(srcroot, srcroot_level);
1528                 rcu_read_unlock();
1529         }
1530
1531         /*
1532          * add qgroup to all inherited groups
1533          */
1534         if (inherit) {
1535                 i_qgroups = (u64 *)(inherit + 1);
1536                 for (i = 0; i < inherit->num_qgroups; ++i) {
1537                         ret = add_qgroup_relation_item(trans, quota_root,
1538                                                        objectid, *i_qgroups);
1539                         if (ret)
1540                                 goto out;
1541                         ret = add_qgroup_relation_item(trans, quota_root,
1542                                                        *i_qgroups, objectid);
1543                         if (ret)
1544                                 goto out;
1545                         ++i_qgroups;
1546                 }
1547         }
1548
1549
1550         spin_lock(&fs_info->qgroup_lock);
1551
1552         dstgroup = add_qgroup_rb(fs_info, objectid);
1553         if (IS_ERR(dstgroup)) {
1554                 ret = PTR_ERR(dstgroup);
1555                 goto unlock;
1556         }
1557
1558         if (srcid) {
1559                 srcgroup = find_qgroup_rb(fs_info, srcid);
1560                 if (!srcgroup)
1561                         goto unlock;
1562                 dstgroup->rfer = srcgroup->rfer - level_size;
1563                 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr - level_size;
1564                 srcgroup->excl = level_size;
1565                 srcgroup->excl_cmpr = level_size;
1566                 qgroup_dirty(fs_info, dstgroup);
1567                 qgroup_dirty(fs_info, srcgroup);
1568         }
1569
1570         if (!inherit)
1571                 goto unlock;
1572
1573         i_qgroups = (u64 *)(inherit + 1);
1574         for (i = 0; i < inherit->num_qgroups; ++i) {
1575                 ret = add_relation_rb(quota_root->fs_info, objectid,
1576                                       *i_qgroups);
1577                 if (ret)
1578                         goto unlock;
1579                 ++i_qgroups;
1580         }
1581
1582         for (i = 0; i <  inherit->num_ref_copies; ++i) {
1583                 struct btrfs_qgroup *src;
1584                 struct btrfs_qgroup *dst;
1585
1586                 src = find_qgroup_rb(fs_info, i_qgroups[0]);
1587                 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
1588
1589                 if (!src || !dst) {
1590                         ret = -EINVAL;
1591                         goto unlock;
1592                 }
1593
1594                 dst->rfer = src->rfer - level_size;
1595                 dst->rfer_cmpr = src->rfer_cmpr - level_size;
1596                 i_qgroups += 2;
1597         }
1598         for (i = 0; i <  inherit->num_excl_copies; ++i) {
1599                 struct btrfs_qgroup *src;
1600                 struct btrfs_qgroup *dst;
1601
1602                 src = find_qgroup_rb(fs_info, i_qgroups[0]);
1603                 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
1604
1605                 if (!src || !dst) {
1606                         ret = -EINVAL;
1607                         goto unlock;
1608                 }
1609
1610                 dst->excl = src->excl + level_size;
1611                 dst->excl_cmpr = src->excl_cmpr + level_size;
1612                 i_qgroups += 2;
1613         }
1614
1615 unlock:
1616         spin_unlock(&fs_info->qgroup_lock);
1617 out:
1618         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1619         return ret;
1620 }
1621
1622 /*
1623  * reserve some space for a qgroup and all its parents. The reservation takes
1624  * place with start_transaction or dealloc_reserve, similar to ENOSPC
1625  * accounting. If not enough space is available, EDQUOT is returned.
1626  * We assume that the requested space is new for all qgroups.
1627  */
1628 int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
1629 {
1630         struct btrfs_root *quota_root;
1631         struct btrfs_qgroup *qgroup;
1632         struct btrfs_fs_info *fs_info = root->fs_info;
1633         u64 ref_root = root->root_key.objectid;
1634         int ret = 0;
1635         struct ulist *ulist = NULL;
1636         struct ulist_node *unode;
1637         struct ulist_iterator uiter;
1638
1639         if (!is_fstree(ref_root))
1640                 return 0;
1641
1642         if (num_bytes == 0)
1643                 return 0;
1644
1645         spin_lock(&fs_info->qgroup_lock);
1646         quota_root = fs_info->quota_root;
1647         if (!quota_root)
1648                 goto out;
1649
1650         qgroup = find_qgroup_rb(fs_info, ref_root);
1651         if (!qgroup)
1652                 goto out;
1653
1654         /*
1655          * in a first step, we check all affected qgroups if any limits would
1656          * be exceeded
1657          */
1658         ulist = ulist_alloc(GFP_ATOMIC);
1659         if (!ulist) {
1660                 ret = -ENOMEM;
1661                 goto out;
1662         }
1663         ret = ulist_add(ulist, qgroup->qgroupid,
1664                         (uintptr_t)qgroup, GFP_ATOMIC);
1665         if (ret < 0)
1666                 goto out;
1667         ULIST_ITER_INIT(&uiter);
1668         while ((unode = ulist_next(ulist, &uiter))) {
1669                 struct btrfs_qgroup *qg;
1670                 struct btrfs_qgroup_list *glist;
1671
1672                 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
1673
1674                 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
1675                     qg->reserved + (s64)qg->rfer + num_bytes >
1676                     qg->max_rfer) {
1677                         ret = -EDQUOT;
1678                         goto out;
1679                 }
1680
1681                 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
1682                     qg->reserved + (s64)qg->excl + num_bytes >
1683                     qg->max_excl) {
1684                         ret = -EDQUOT;
1685                         goto out;
1686                 }
1687
1688                 list_for_each_entry(glist, &qg->groups, next_group) {
1689                         ret = ulist_add(ulist, glist->group->qgroupid,
1690                                         (uintptr_t)glist->group, GFP_ATOMIC);
1691                         if (ret < 0)
1692                                 goto out;
1693                 }
1694         }
1695         ret = 0;
1696         /*
1697          * no limits exceeded, now record the reservation into all qgroups
1698          */
1699         ULIST_ITER_INIT(&uiter);
1700         while ((unode = ulist_next(ulist, &uiter))) {
1701                 struct btrfs_qgroup *qg;
1702
1703                 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
1704
1705                 qg->reserved += num_bytes;
1706         }
1707
1708 out:
1709         spin_unlock(&fs_info->qgroup_lock);
1710         ulist_free(ulist);
1711
1712         return ret;
1713 }
1714
1715 void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
1716 {
1717         struct btrfs_root *quota_root;
1718         struct btrfs_qgroup *qgroup;
1719         struct btrfs_fs_info *fs_info = root->fs_info;
1720         struct ulist *ulist = NULL;
1721         struct ulist_node *unode;
1722         struct ulist_iterator uiter;
1723         u64 ref_root = root->root_key.objectid;
1724         int ret = 0;
1725
1726         if (!is_fstree(ref_root))
1727                 return;
1728
1729         if (num_bytes == 0)
1730                 return;
1731
1732         spin_lock(&fs_info->qgroup_lock);
1733
1734         quota_root = fs_info->quota_root;
1735         if (!quota_root)
1736                 goto out;
1737
1738         qgroup = find_qgroup_rb(fs_info, ref_root);
1739         if (!qgroup)
1740                 goto out;
1741
1742         ulist = ulist_alloc(GFP_ATOMIC);
1743         if (!ulist) {
1744                 btrfs_std_error(fs_info, -ENOMEM);
1745                 goto out;
1746         }
1747         ret = ulist_add(ulist, qgroup->qgroupid,
1748                         (uintptr_t)qgroup, GFP_ATOMIC);
1749         if (ret < 0)
1750                 goto out;
1751         ULIST_ITER_INIT(&uiter);
1752         while ((unode = ulist_next(ulist, &uiter))) {
1753                 struct btrfs_qgroup *qg;
1754                 struct btrfs_qgroup_list *glist;
1755
1756                 qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
1757
1758                 qg->reserved -= num_bytes;
1759
1760                 list_for_each_entry(glist, &qg->groups, next_group) {
1761                         ret = ulist_add(ulist, glist->group->qgroupid,
1762                                         (uintptr_t)glist->group, GFP_ATOMIC);
1763                         if (ret < 0)
1764                                 goto out;
1765                 }
1766         }
1767
1768 out:
1769         spin_unlock(&fs_info->qgroup_lock);
1770         ulist_free(ulist);
1771 }
1772
1773 void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
1774 {
1775         if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
1776                 return;
1777         pr_err("btrfs: qgroups not uptodate in trans handle %p: list is%s empty, seq is %#x.%x\n",
1778                 trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
1779                 (u32)(trans->delayed_ref_elem.seq >> 32),
1780                 (u32)trans->delayed_ref_elem.seq);
1781         BUG();
1782 }