Btrfs: fs_info variable for join_transaction
[firefly-linux-kernel-4.4.55.git] / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include "ctree.h"
26 #include "disk-io.h"
27 #include "transaction.h"
28 #include "locking.h"
29 #include "tree-log.h"
30 #include "inode-map.h"
31
32 #define BTRFS_ROOT_TRANS_TAG 0
33
34 void put_transaction(struct btrfs_transaction *transaction)
35 {
36         WARN_ON(atomic_read(&transaction->use_count) == 0);
37         if (atomic_dec_and_test(&transaction->use_count)) {
38                 BUG_ON(!list_empty(&transaction->list));
39                 WARN_ON(transaction->delayed_refs.root.rb_node);
40                 WARN_ON(!list_empty(&transaction->delayed_refs.seq_head));
41                 memset(transaction, 0, sizeof(*transaction));
42                 kmem_cache_free(btrfs_transaction_cachep, transaction);
43         }
44 }
45
46 static noinline void switch_commit_root(struct btrfs_root *root)
47 {
48         free_extent_buffer(root->commit_root);
49         root->commit_root = btrfs_root_node(root);
50 }
51
52 /*
53  * either allocate a new transaction or hop into the existing one
54  */
55 static noinline int join_transaction(struct btrfs_root *root, int nofail)
56 {
57         struct btrfs_transaction *cur_trans;
58         struct btrfs_fs_info *fs_info = root->fs_info;
59
60         spin_lock(&fs_info->trans_lock);
61 loop:
62         /* The file system has been taken offline. No new transactions. */
63         if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
64                 spin_unlock(&fs_info->trans_lock);
65                 return -EROFS;
66         }
67
68         if (fs_info->trans_no_join) {
69                 if (!nofail) {
70                         spin_unlock(&fs_info->trans_lock);
71                         return -EBUSY;
72                 }
73         }
74
75         cur_trans = fs_info->running_transaction;
76         if (cur_trans) {
77                 if (cur_trans->aborted) {
78                         spin_unlock(&fs_info->trans_lock);
79                         return cur_trans->aborted;
80                 }
81                 atomic_inc(&cur_trans->use_count);
82                 atomic_inc(&cur_trans->num_writers);
83                 cur_trans->num_joined++;
84                 spin_unlock(&fs_info->trans_lock);
85                 return 0;
86         }
87         spin_unlock(&fs_info->trans_lock);
88
89         cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
90         if (!cur_trans)
91                 return -ENOMEM;
92
93         spin_lock(&fs_info->trans_lock);
94         if (fs_info->running_transaction) {
95                 /*
96                  * someone started a transaction after we unlocked.  Make sure
97                  * to redo the trans_no_join checks above
98                  */
99                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
100                 cur_trans = fs_info->running_transaction;
101                 goto loop;
102         }
103
104         atomic_set(&cur_trans->num_writers, 1);
105         cur_trans->num_joined = 0;
106         init_waitqueue_head(&cur_trans->writer_wait);
107         init_waitqueue_head(&cur_trans->commit_wait);
108         cur_trans->in_commit = 0;
109         cur_trans->blocked = 0;
110         /*
111          * One for this trans handle, one so it will live on until we
112          * commit the transaction.
113          */
114         atomic_set(&cur_trans->use_count, 2);
115         cur_trans->commit_done = 0;
116         cur_trans->start_time = get_seconds();
117
118         cur_trans->delayed_refs.root = RB_ROOT;
119         cur_trans->delayed_refs.num_entries = 0;
120         cur_trans->delayed_refs.num_heads_ready = 0;
121         cur_trans->delayed_refs.num_heads = 0;
122         cur_trans->delayed_refs.flushing = 0;
123         cur_trans->delayed_refs.run_delayed_start = 0;
124         cur_trans->delayed_refs.seq = 1;
125         init_waitqueue_head(&cur_trans->delayed_refs.seq_wait);
126         spin_lock_init(&cur_trans->commit_lock);
127         spin_lock_init(&cur_trans->delayed_refs.lock);
128         INIT_LIST_HEAD(&cur_trans->delayed_refs.seq_head);
129
130         INIT_LIST_HEAD(&cur_trans->pending_snapshots);
131         list_add_tail(&cur_trans->list, &fs_info->trans_list);
132         extent_io_tree_init(&cur_trans->dirty_pages,
133                              fs_info->btree_inode->i_mapping);
134         fs_info->generation++;
135         cur_trans->transid = fs_info->generation;
136         fs_info->running_transaction = cur_trans;
137         cur_trans->aborted = 0;
138         spin_unlock(&fs_info->trans_lock);
139
140         return 0;
141 }
142
143 /*
144  * this does all the record keeping required to make sure that a reference
145  * counted root is properly recorded in a given transaction.  This is required
146  * to make sure the old root from before we joined the transaction is deleted
147  * when the transaction commits
148  */
149 static int record_root_in_trans(struct btrfs_trans_handle *trans,
150                                struct btrfs_root *root)
151 {
152         if (root->ref_cows && root->last_trans < trans->transid) {
153                 WARN_ON(root == root->fs_info->extent_root);
154                 WARN_ON(root->commit_root != root->node);
155
156                 /*
157                  * see below for in_trans_setup usage rules
158                  * we have the reloc mutex held now, so there
159                  * is only one writer in this function
160                  */
161                 root->in_trans_setup = 1;
162
163                 /* make sure readers find in_trans_setup before
164                  * they find our root->last_trans update
165                  */
166                 smp_wmb();
167
168                 spin_lock(&root->fs_info->fs_roots_radix_lock);
169                 if (root->last_trans == trans->transid) {
170                         spin_unlock(&root->fs_info->fs_roots_radix_lock);
171                         return 0;
172                 }
173                 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
174                            (unsigned long)root->root_key.objectid,
175                            BTRFS_ROOT_TRANS_TAG);
176                 spin_unlock(&root->fs_info->fs_roots_radix_lock);
177                 root->last_trans = trans->transid;
178
179                 /* this is pretty tricky.  We don't want to
180                  * take the relocation lock in btrfs_record_root_in_trans
181                  * unless we're really doing the first setup for this root in
182                  * this transaction.
183                  *
184                  * Normally we'd use root->last_trans as a flag to decide
185                  * if we want to take the expensive mutex.
186                  *
187                  * But, we have to set root->last_trans before we
188                  * init the relocation root, otherwise, we trip over warnings
189                  * in ctree.c.  The solution used here is to flag ourselves
190                  * with root->in_trans_setup.  When this is 1, we're still
191                  * fixing up the reloc trees and everyone must wait.
192                  *
193                  * When this is zero, they can trust root->last_trans and fly
194                  * through btrfs_record_root_in_trans without having to take the
195                  * lock.  smp_wmb() makes sure that all the writes above are
196                  * done before we pop in the zero below
197                  */
198                 btrfs_init_reloc_root(trans, root);
199                 smp_wmb();
200                 root->in_trans_setup = 0;
201         }
202         return 0;
203 }
204
205
206 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
207                                struct btrfs_root *root)
208 {
209         if (!root->ref_cows)
210                 return 0;
211
212         /*
213          * see record_root_in_trans for comments about in_trans_setup usage
214          * and barriers
215          */
216         smp_rmb();
217         if (root->last_trans == trans->transid &&
218             !root->in_trans_setup)
219                 return 0;
220
221         mutex_lock(&root->fs_info->reloc_mutex);
222         record_root_in_trans(trans, root);
223         mutex_unlock(&root->fs_info->reloc_mutex);
224
225         return 0;
226 }
227
228 /* wait for commit against the current transaction to become unblocked
229  * when this is done, it is safe to start a new transaction, but the current
230  * transaction might not be fully on disk.
231  */
232 static void wait_current_trans(struct btrfs_root *root)
233 {
234         struct btrfs_transaction *cur_trans;
235
236         spin_lock(&root->fs_info->trans_lock);
237         cur_trans = root->fs_info->running_transaction;
238         if (cur_trans && cur_trans->blocked) {
239                 atomic_inc(&cur_trans->use_count);
240                 spin_unlock(&root->fs_info->trans_lock);
241
242                 wait_event(root->fs_info->transaction_wait,
243                            !cur_trans->blocked);
244                 put_transaction(cur_trans);
245         } else {
246                 spin_unlock(&root->fs_info->trans_lock);
247         }
248 }
249
250 enum btrfs_trans_type {
251         TRANS_START,
252         TRANS_JOIN,
253         TRANS_USERSPACE,
254         TRANS_JOIN_NOLOCK,
255 };
256
257 static int may_wait_transaction(struct btrfs_root *root, int type)
258 {
259         if (root->fs_info->log_root_recovering)
260                 return 0;
261
262         if (type == TRANS_USERSPACE)
263                 return 1;
264
265         if (type == TRANS_START &&
266             !atomic_read(&root->fs_info->open_ioctl_trans))
267                 return 1;
268
269         return 0;
270 }
271
272 static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
273                                                     u64 num_items, int type)
274 {
275         struct btrfs_trans_handle *h;
276         struct btrfs_transaction *cur_trans;
277         u64 num_bytes = 0;
278         int ret;
279
280         if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
281                 return ERR_PTR(-EROFS);
282
283         if (current->journal_info) {
284                 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
285                 h = current->journal_info;
286                 h->use_count++;
287                 h->orig_rsv = h->block_rsv;
288                 h->block_rsv = NULL;
289                 goto got_it;
290         }
291
292         /*
293          * Do the reservation before we join the transaction so we can do all
294          * the appropriate flushing if need be.
295          */
296         if (num_items > 0 && root != root->fs_info->chunk_root) {
297                 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
298                 ret = btrfs_block_rsv_add(root,
299                                           &root->fs_info->trans_block_rsv,
300                                           num_bytes);
301                 if (ret)
302                         return ERR_PTR(ret);
303         }
304 again:
305         h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
306         if (!h)
307                 return ERR_PTR(-ENOMEM);
308
309         if (may_wait_transaction(root, type))
310                 wait_current_trans(root);
311
312         do {
313                 ret = join_transaction(root, type == TRANS_JOIN_NOLOCK);
314                 if (ret == -EBUSY)
315                         wait_current_trans(root);
316         } while (ret == -EBUSY);
317
318         if (ret < 0) {
319                 kmem_cache_free(btrfs_trans_handle_cachep, h);
320                 return ERR_PTR(ret);
321         }
322
323         cur_trans = root->fs_info->running_transaction;
324
325         h->transid = cur_trans->transid;
326         h->transaction = cur_trans;
327         h->blocks_used = 0;
328         h->bytes_reserved = 0;
329         h->delayed_ref_updates = 0;
330         h->use_count = 1;
331         h->block_rsv = NULL;
332         h->orig_rsv = NULL;
333         h->aborted = 0;
334
335         smp_mb();
336         if (cur_trans->blocked && may_wait_transaction(root, type)) {
337                 btrfs_commit_transaction(h, root);
338                 goto again;
339         }
340
341         if (num_bytes) {
342                 trace_btrfs_space_reservation(root->fs_info, "transaction",
343                                               h->transid, num_bytes, 1);
344                 h->block_rsv = &root->fs_info->trans_block_rsv;
345                 h->bytes_reserved = num_bytes;
346         }
347
348 got_it:
349         btrfs_record_root_in_trans(h, root);
350
351         if (!current->journal_info && type != TRANS_USERSPACE)
352                 current->journal_info = h;
353         return h;
354 }
355
356 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
357                                                    int num_items)
358 {
359         return start_transaction(root, num_items, TRANS_START);
360 }
361 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
362 {
363         return start_transaction(root, 0, TRANS_JOIN);
364 }
365
366 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
367 {
368         return start_transaction(root, 0, TRANS_JOIN_NOLOCK);
369 }
370
371 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
372 {
373         return start_transaction(root, 0, TRANS_USERSPACE);
374 }
375
376 /* wait for a transaction commit to be fully complete */
377 static noinline void wait_for_commit(struct btrfs_root *root,
378                                     struct btrfs_transaction *commit)
379 {
380         wait_event(commit->commit_wait, commit->commit_done);
381 }
382
383 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
384 {
385         struct btrfs_transaction *cur_trans = NULL, *t;
386         int ret;
387
388         ret = 0;
389         if (transid) {
390                 if (transid <= root->fs_info->last_trans_committed)
391                         goto out;
392
393                 /* find specified transaction */
394                 spin_lock(&root->fs_info->trans_lock);
395                 list_for_each_entry(t, &root->fs_info->trans_list, list) {
396                         if (t->transid == transid) {
397                                 cur_trans = t;
398                                 atomic_inc(&cur_trans->use_count);
399                                 break;
400                         }
401                         if (t->transid > transid)
402                                 break;
403                 }
404                 spin_unlock(&root->fs_info->trans_lock);
405                 ret = -EINVAL;
406                 if (!cur_trans)
407                         goto out;  /* bad transid */
408         } else {
409                 /* find newest transaction that is committing | committed */
410                 spin_lock(&root->fs_info->trans_lock);
411                 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
412                                             list) {
413                         if (t->in_commit) {
414                                 if (t->commit_done)
415                                         break;
416                                 cur_trans = t;
417                                 atomic_inc(&cur_trans->use_count);
418                                 break;
419                         }
420                 }
421                 spin_unlock(&root->fs_info->trans_lock);
422                 if (!cur_trans)
423                         goto out;  /* nothing committing|committed */
424         }
425
426         wait_for_commit(root, cur_trans);
427
428         put_transaction(cur_trans);
429         ret = 0;
430 out:
431         return ret;
432 }
433
434 void btrfs_throttle(struct btrfs_root *root)
435 {
436         if (!atomic_read(&root->fs_info->open_ioctl_trans))
437                 wait_current_trans(root);
438 }
439
440 static int should_end_transaction(struct btrfs_trans_handle *trans,
441                                   struct btrfs_root *root)
442 {
443         int ret;
444
445         ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
446         return ret ? 1 : 0;
447 }
448
449 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
450                                  struct btrfs_root *root)
451 {
452         struct btrfs_transaction *cur_trans = trans->transaction;
453         struct btrfs_block_rsv *rsv = trans->block_rsv;
454         int updates;
455         int err;
456
457         smp_mb();
458         if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
459                 return 1;
460
461         /*
462          * We need to do this in case we're deleting csums so the global block
463          * rsv get's used instead of the csum block rsv.
464          */
465         trans->block_rsv = NULL;
466
467         updates = trans->delayed_ref_updates;
468         trans->delayed_ref_updates = 0;
469         if (updates) {
470                 err = btrfs_run_delayed_refs(trans, root, updates);
471                 if (err) /* Error code will also eval true */
472                         return err;
473         }
474
475         trans->block_rsv = rsv;
476
477         return should_end_transaction(trans, root);
478 }
479
480 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
481                           struct btrfs_root *root, int throttle, int lock)
482 {
483         struct btrfs_transaction *cur_trans = trans->transaction;
484         struct btrfs_fs_info *info = root->fs_info;
485         int count = 0;
486         int err = 0;
487
488         if (--trans->use_count) {
489                 trans->block_rsv = trans->orig_rsv;
490                 return 0;
491         }
492
493         btrfs_trans_release_metadata(trans, root);
494         trans->block_rsv = NULL;
495         while (count < 2) {
496                 unsigned long cur = trans->delayed_ref_updates;
497                 trans->delayed_ref_updates = 0;
498                 if (cur &&
499                     trans->transaction->delayed_refs.num_heads_ready > 64) {
500                         trans->delayed_ref_updates = 0;
501                         btrfs_run_delayed_refs(trans, root, cur);
502                 } else {
503                         break;
504                 }
505                 count++;
506         }
507
508         if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
509             should_end_transaction(trans, root)) {
510                 trans->transaction->blocked = 1;
511                 smp_wmb();
512         }
513
514         if (lock && cur_trans->blocked && !cur_trans->in_commit) {
515                 if (throttle) {
516                         /*
517                          * We may race with somebody else here so end up having
518                          * to call end_transaction on ourselves again, so inc
519                          * our use_count.
520                          */
521                         trans->use_count++;
522                         return btrfs_commit_transaction(trans, root);
523                 } else {
524                         wake_up_process(info->transaction_kthread);
525                 }
526         }
527
528         WARN_ON(cur_trans != info->running_transaction);
529         WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
530         atomic_dec(&cur_trans->num_writers);
531
532         smp_mb();
533         if (waitqueue_active(&cur_trans->writer_wait))
534                 wake_up(&cur_trans->writer_wait);
535         put_transaction(cur_trans);
536
537         if (current->journal_info == trans)
538                 current->journal_info = NULL;
539
540         if (throttle)
541                 btrfs_run_delayed_iputs(root);
542
543         if (trans->aborted ||
544             root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
545                 err = -EIO;
546         }
547
548         memset(trans, 0, sizeof(*trans));
549         kmem_cache_free(btrfs_trans_handle_cachep, trans);
550         return err;
551 }
552
553 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
554                           struct btrfs_root *root)
555 {
556         int ret;
557
558         ret = __btrfs_end_transaction(trans, root, 0, 1);
559         if (ret)
560                 return ret;
561         return 0;
562 }
563
564 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
565                                    struct btrfs_root *root)
566 {
567         int ret;
568
569         ret = __btrfs_end_transaction(trans, root, 1, 1);
570         if (ret)
571                 return ret;
572         return 0;
573 }
574
575 int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
576                                  struct btrfs_root *root)
577 {
578         int ret;
579
580         ret = __btrfs_end_transaction(trans, root, 0, 0);
581         if (ret)
582                 return ret;
583         return 0;
584 }
585
586 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
587                                 struct btrfs_root *root)
588 {
589         return __btrfs_end_transaction(trans, root, 1, 1);
590 }
591
592 /*
593  * when btree blocks are allocated, they have some corresponding bits set for
594  * them in one of two extent_io trees.  This is used to make sure all of
595  * those extents are sent to disk but does not wait on them
596  */
597 int btrfs_write_marked_extents(struct btrfs_root *root,
598                                struct extent_io_tree *dirty_pages, int mark)
599 {
600         int err = 0;
601         int werr = 0;
602         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
603         u64 start = 0;
604         u64 end;
605
606         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
607                                       mark)) {
608                 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, mark,
609                                    GFP_NOFS);
610                 err = filemap_fdatawrite_range(mapping, start, end);
611                 if (err)
612                         werr = err;
613                 cond_resched();
614                 start = end + 1;
615         }
616         if (err)
617                 werr = err;
618         return werr;
619 }
620
621 /*
622  * when btree blocks are allocated, they have some corresponding bits set for
623  * them in one of two extent_io trees.  This is used to make sure all of
624  * those extents are on disk for transaction or log commit.  We wait
625  * on all the pages and clear them from the dirty pages state tree
626  */
627 int btrfs_wait_marked_extents(struct btrfs_root *root,
628                               struct extent_io_tree *dirty_pages, int mark)
629 {
630         int err = 0;
631         int werr = 0;
632         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
633         u64 start = 0;
634         u64 end;
635
636         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
637                                       EXTENT_NEED_WAIT)) {
638                 clear_extent_bits(dirty_pages, start, end, EXTENT_NEED_WAIT, GFP_NOFS);
639                 err = filemap_fdatawait_range(mapping, start, end);
640                 if (err)
641                         werr = err;
642                 cond_resched();
643                 start = end + 1;
644         }
645         if (err)
646                 werr = err;
647         return werr;
648 }
649
650 /*
651  * when btree blocks are allocated, they have some corresponding bits set for
652  * them in one of two extent_io trees.  This is used to make sure all of
653  * those extents are on disk for transaction or log commit
654  */
655 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
656                                 struct extent_io_tree *dirty_pages, int mark)
657 {
658         int ret;
659         int ret2;
660
661         ret = btrfs_write_marked_extents(root, dirty_pages, mark);
662         ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
663
664         if (ret)
665                 return ret;
666         if (ret2)
667                 return ret2;
668         return 0;
669 }
670
671 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
672                                      struct btrfs_root *root)
673 {
674         if (!trans || !trans->transaction) {
675                 struct inode *btree_inode;
676                 btree_inode = root->fs_info->btree_inode;
677                 return filemap_write_and_wait(btree_inode->i_mapping);
678         }
679         return btrfs_write_and_wait_marked_extents(root,
680                                            &trans->transaction->dirty_pages,
681                                            EXTENT_DIRTY);
682 }
683
684 /*
685  * this is used to update the root pointer in the tree of tree roots.
686  *
687  * But, in the case of the extent allocation tree, updating the root
688  * pointer may allocate blocks which may change the root of the extent
689  * allocation tree.
690  *
691  * So, this loops and repeats and makes sure the cowonly root didn't
692  * change while the root pointer was being updated in the metadata.
693  */
694 static int update_cowonly_root(struct btrfs_trans_handle *trans,
695                                struct btrfs_root *root)
696 {
697         int ret;
698         u64 old_root_bytenr;
699         u64 old_root_used;
700         struct btrfs_root *tree_root = root->fs_info->tree_root;
701
702         old_root_used = btrfs_root_used(&root->root_item);
703         btrfs_write_dirty_block_groups(trans, root);
704
705         while (1) {
706                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
707                 if (old_root_bytenr == root->node->start &&
708                     old_root_used == btrfs_root_used(&root->root_item))
709                         break;
710
711                 btrfs_set_root_node(&root->root_item, root->node);
712                 ret = btrfs_update_root(trans, tree_root,
713                                         &root->root_key,
714                                         &root->root_item);
715                 if (ret)
716                         return ret;
717
718                 old_root_used = btrfs_root_used(&root->root_item);
719                 ret = btrfs_write_dirty_block_groups(trans, root);
720                 if (ret)
721                         return ret;
722         }
723
724         if (root != root->fs_info->extent_root)
725                 switch_commit_root(root);
726
727         return 0;
728 }
729
730 /*
731  * update all the cowonly tree roots on disk
732  *
733  * The error handling in this function may not be obvious. Any of the
734  * failures will cause the file system to go offline. We still need
735  * to clean up the delayed refs.
736  */
737 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
738                                          struct btrfs_root *root)
739 {
740         struct btrfs_fs_info *fs_info = root->fs_info;
741         struct list_head *next;
742         struct extent_buffer *eb;
743         int ret;
744
745         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
746         if (ret)
747                 return ret;
748
749         eb = btrfs_lock_root_node(fs_info->tree_root);
750         ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
751                               0, &eb);
752         btrfs_tree_unlock(eb);
753         free_extent_buffer(eb);
754
755         if (ret)
756                 return ret;
757
758         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
759         if (ret)
760                 return ret;
761
762         while (!list_empty(&fs_info->dirty_cowonly_roots)) {
763                 next = fs_info->dirty_cowonly_roots.next;
764                 list_del_init(next);
765                 root = list_entry(next, struct btrfs_root, dirty_list);
766
767                 ret = update_cowonly_root(trans, root);
768                 if (ret)
769                         return ret;
770         }
771
772         down_write(&fs_info->extent_commit_sem);
773         switch_commit_root(fs_info->extent_root);
774         up_write(&fs_info->extent_commit_sem);
775
776         return 0;
777 }
778
779 /*
780  * dead roots are old snapshots that need to be deleted.  This allocates
781  * a dirty root struct and adds it into the list of dead roots that need to
782  * be deleted
783  */
784 int btrfs_add_dead_root(struct btrfs_root *root)
785 {
786         spin_lock(&root->fs_info->trans_lock);
787         list_add(&root->root_list, &root->fs_info->dead_roots);
788         spin_unlock(&root->fs_info->trans_lock);
789         return 0;
790 }
791
792 /*
793  * update all the cowonly tree roots on disk
794  */
795 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
796                                     struct btrfs_root *root)
797 {
798         struct btrfs_root *gang[8];
799         struct btrfs_fs_info *fs_info = root->fs_info;
800         int i;
801         int ret;
802         int err = 0;
803
804         spin_lock(&fs_info->fs_roots_radix_lock);
805         while (1) {
806                 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
807                                                  (void **)gang, 0,
808                                                  ARRAY_SIZE(gang),
809                                                  BTRFS_ROOT_TRANS_TAG);
810                 if (ret == 0)
811                         break;
812                 for (i = 0; i < ret; i++) {
813                         root = gang[i];
814                         radix_tree_tag_clear(&fs_info->fs_roots_radix,
815                                         (unsigned long)root->root_key.objectid,
816                                         BTRFS_ROOT_TRANS_TAG);
817                         spin_unlock(&fs_info->fs_roots_radix_lock);
818
819                         btrfs_free_log(trans, root);
820                         btrfs_update_reloc_root(trans, root);
821                         btrfs_orphan_commit_root(trans, root);
822
823                         btrfs_save_ino_cache(root, trans);
824
825                         /* see comments in should_cow_block() */
826                         root->force_cow = 0;
827                         smp_wmb();
828
829                         if (root->commit_root != root->node) {
830                                 mutex_lock(&root->fs_commit_mutex);
831                                 switch_commit_root(root);
832                                 btrfs_unpin_free_ino(root);
833                                 mutex_unlock(&root->fs_commit_mutex);
834
835                                 btrfs_set_root_node(&root->root_item,
836                                                     root->node);
837                         }
838
839                         err = btrfs_update_root(trans, fs_info->tree_root,
840                                                 &root->root_key,
841                                                 &root->root_item);
842                         spin_lock(&fs_info->fs_roots_radix_lock);
843                         if (err)
844                                 break;
845                 }
846         }
847         spin_unlock(&fs_info->fs_roots_radix_lock);
848         return err;
849 }
850
851 /*
852  * defrag a given btree.  If cacheonly == 1, this won't read from the disk,
853  * otherwise every leaf in the btree is read and defragged.
854  */
855 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
856 {
857         struct btrfs_fs_info *info = root->fs_info;
858         struct btrfs_trans_handle *trans;
859         int ret;
860         unsigned long nr;
861
862         if (xchg(&root->defrag_running, 1))
863                 return 0;
864
865         while (1) {
866                 trans = btrfs_start_transaction(root, 0);
867                 if (IS_ERR(trans))
868                         return PTR_ERR(trans);
869
870                 ret = btrfs_defrag_leaves(trans, root, cacheonly);
871
872                 nr = trans->blocks_used;
873                 btrfs_end_transaction(trans, root);
874                 btrfs_btree_balance_dirty(info->tree_root, nr);
875                 cond_resched();
876
877                 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
878                         break;
879         }
880         root->defrag_running = 0;
881         return ret;
882 }
883
884 /*
885  * new snapshots need to be created at a very specific time in the
886  * transaction commit.  This does the actual creation
887  */
888 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
889                                    struct btrfs_fs_info *fs_info,
890                                    struct btrfs_pending_snapshot *pending)
891 {
892         struct btrfs_key key;
893         struct btrfs_root_item *new_root_item;
894         struct btrfs_root *tree_root = fs_info->tree_root;
895         struct btrfs_root *root = pending->root;
896         struct btrfs_root *parent_root;
897         struct btrfs_block_rsv *rsv;
898         struct inode *parent_inode;
899         struct dentry *parent;
900         struct dentry *dentry;
901         struct extent_buffer *tmp;
902         struct extent_buffer *old;
903         int ret;
904         u64 to_reserve = 0;
905         u64 index = 0;
906         u64 objectid;
907         u64 root_flags;
908
909         rsv = trans->block_rsv;
910
911         new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
912         if (!new_root_item) {
913                 ret = pending->error = -ENOMEM;
914                 goto fail;
915         }
916
917         ret = btrfs_find_free_objectid(tree_root, &objectid);
918         if (ret) {
919                 pending->error = ret;
920                 goto fail;
921         }
922
923         btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
924
925         if (to_reserve > 0) {
926                 ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv,
927                                                   to_reserve);
928                 if (ret) {
929                         pending->error = ret;
930                         goto fail;
931                 }
932         }
933
934         key.objectid = objectid;
935         key.offset = (u64)-1;
936         key.type = BTRFS_ROOT_ITEM_KEY;
937
938         trans->block_rsv = &pending->block_rsv;
939
940         dentry = pending->dentry;
941         parent = dget_parent(dentry);
942         parent_inode = parent->d_inode;
943         parent_root = BTRFS_I(parent_inode)->root;
944         record_root_in_trans(trans, parent_root);
945
946         /*
947          * insert the directory item
948          */
949         ret = btrfs_set_inode_index(parent_inode, &index);
950         BUG_ON(ret); /* -ENOMEM */
951         ret = btrfs_insert_dir_item(trans, parent_root,
952                                 dentry->d_name.name, dentry->d_name.len,
953                                 parent_inode, &key,
954                                 BTRFS_FT_DIR, index);
955         if (ret == -EEXIST) {
956                 pending->error = -EEXIST;
957                 dput(parent);
958                 goto fail;
959         } else if (ret) {
960                 goto abort_trans_dput;
961         }
962
963         btrfs_i_size_write(parent_inode, parent_inode->i_size +
964                                          dentry->d_name.len * 2);
965         ret = btrfs_update_inode(trans, parent_root, parent_inode);
966         if (ret)
967                 goto abort_trans_dput;
968
969         /*
970          * pull in the delayed directory update
971          * and the delayed inode item
972          * otherwise we corrupt the FS during
973          * snapshot
974          */
975         ret = btrfs_run_delayed_items(trans, root);
976         if (ret) { /* Transaction aborted */
977                 dput(parent);
978                 goto fail;
979         }
980
981         record_root_in_trans(trans, root);
982         btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
983         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
984         btrfs_check_and_init_root_item(new_root_item);
985
986         root_flags = btrfs_root_flags(new_root_item);
987         if (pending->readonly)
988                 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
989         else
990                 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
991         btrfs_set_root_flags(new_root_item, root_flags);
992
993         old = btrfs_lock_root_node(root);
994         ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
995         if (ret) {
996                 btrfs_tree_unlock(old);
997                 free_extent_buffer(old);
998                 goto abort_trans_dput;
999         }
1000
1001         btrfs_set_lock_blocking(old);
1002
1003         ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1004         /* clean up in any case */
1005         btrfs_tree_unlock(old);
1006         free_extent_buffer(old);
1007         if (ret)
1008                 goto abort_trans_dput;
1009
1010         /* see comments in should_cow_block() */
1011         root->force_cow = 1;
1012         smp_wmb();
1013
1014         btrfs_set_root_node(new_root_item, tmp);
1015         /* record when the snapshot was created in key.offset */
1016         key.offset = trans->transid;
1017         ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1018         btrfs_tree_unlock(tmp);
1019         free_extent_buffer(tmp);
1020         if (ret)
1021                 goto abort_trans_dput;
1022
1023         /*
1024          * insert root back/forward references
1025          */
1026         ret = btrfs_add_root_ref(trans, tree_root, objectid,
1027                                  parent_root->root_key.objectid,
1028                                  btrfs_ino(parent_inode), index,
1029                                  dentry->d_name.name, dentry->d_name.len);
1030         dput(parent);
1031         if (ret)
1032                 goto fail;
1033
1034         key.offset = (u64)-1;
1035         pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
1036         if (IS_ERR(pending->snap)) {
1037                 ret = PTR_ERR(pending->snap);
1038                 goto abort_trans;
1039         }
1040
1041         ret = btrfs_reloc_post_snapshot(trans, pending);
1042         if (ret)
1043                 goto abort_trans;
1044         ret = 0;
1045 fail:
1046         kfree(new_root_item);
1047         trans->block_rsv = rsv;
1048         btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
1049         return ret;
1050
1051 abort_trans_dput:
1052         dput(parent);
1053 abort_trans:
1054         btrfs_abort_transaction(trans, root, ret);
1055         goto fail;
1056 }
1057
1058 /*
1059  * create all the snapshots we've scheduled for creation
1060  */
1061 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
1062                                              struct btrfs_fs_info *fs_info)
1063 {
1064         struct btrfs_pending_snapshot *pending;
1065         struct list_head *head = &trans->transaction->pending_snapshots;
1066
1067         list_for_each_entry(pending, head, list)
1068                 create_pending_snapshot(trans, fs_info, pending);
1069         return 0;
1070 }
1071
1072 static void update_super_roots(struct btrfs_root *root)
1073 {
1074         struct btrfs_root_item *root_item;
1075         struct btrfs_super_block *super;
1076
1077         super = root->fs_info->super_copy;
1078
1079         root_item = &root->fs_info->chunk_root->root_item;
1080         super->chunk_root = root_item->bytenr;
1081         super->chunk_root_generation = root_item->generation;
1082         super->chunk_root_level = root_item->level;
1083
1084         root_item = &root->fs_info->tree_root->root_item;
1085         super->root = root_item->bytenr;
1086         super->generation = root_item->generation;
1087         super->root_level = root_item->level;
1088         if (btrfs_test_opt(root, SPACE_CACHE))
1089                 super->cache_generation = root_item->generation;
1090 }
1091
1092 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1093 {
1094         int ret = 0;
1095         spin_lock(&info->trans_lock);
1096         if (info->running_transaction)
1097                 ret = info->running_transaction->in_commit;
1098         spin_unlock(&info->trans_lock);
1099         return ret;
1100 }
1101
1102 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1103 {
1104         int ret = 0;
1105         spin_lock(&info->trans_lock);
1106         if (info->running_transaction)
1107                 ret = info->running_transaction->blocked;
1108         spin_unlock(&info->trans_lock);
1109         return ret;
1110 }
1111
1112 /*
1113  * wait for the current transaction commit to start and block subsequent
1114  * transaction joins
1115  */
1116 static void wait_current_trans_commit_start(struct btrfs_root *root,
1117                                             struct btrfs_transaction *trans)
1118 {
1119         wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
1120 }
1121
1122 /*
1123  * wait for the current transaction to start and then become unblocked.
1124  * caller holds ref.
1125  */
1126 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1127                                          struct btrfs_transaction *trans)
1128 {
1129         wait_event(root->fs_info->transaction_wait,
1130                    trans->commit_done || (trans->in_commit && !trans->blocked));
1131 }
1132
1133 /*
1134  * commit transactions asynchronously. once btrfs_commit_transaction_async
1135  * returns, any subsequent transaction will not be allowed to join.
1136  */
1137 struct btrfs_async_commit {
1138         struct btrfs_trans_handle *newtrans;
1139         struct btrfs_root *root;
1140         struct delayed_work work;
1141 };
1142
1143 static void do_async_commit(struct work_struct *work)
1144 {
1145         struct btrfs_async_commit *ac =
1146                 container_of(work, struct btrfs_async_commit, work.work);
1147
1148         btrfs_commit_transaction(ac->newtrans, ac->root);
1149         kfree(ac);
1150 }
1151
1152 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1153                                    struct btrfs_root *root,
1154                                    int wait_for_unblock)
1155 {
1156         struct btrfs_async_commit *ac;
1157         struct btrfs_transaction *cur_trans;
1158
1159         ac = kmalloc(sizeof(*ac), GFP_NOFS);
1160         if (!ac)
1161                 return -ENOMEM;
1162
1163         INIT_DELAYED_WORK(&ac->work, do_async_commit);
1164         ac->root = root;
1165         ac->newtrans = btrfs_join_transaction(root);
1166         if (IS_ERR(ac->newtrans)) {
1167                 int err = PTR_ERR(ac->newtrans);
1168                 kfree(ac);
1169                 return err;
1170         }
1171
1172         /* take transaction reference */
1173         cur_trans = trans->transaction;
1174         atomic_inc(&cur_trans->use_count);
1175
1176         btrfs_end_transaction(trans, root);
1177         schedule_delayed_work(&ac->work, 0);
1178
1179         /* wait for transaction to start and unblock */
1180         if (wait_for_unblock)
1181                 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1182         else
1183                 wait_current_trans_commit_start(root, cur_trans);
1184
1185         if (current->journal_info == trans)
1186                 current->journal_info = NULL;
1187
1188         put_transaction(cur_trans);
1189         return 0;
1190 }
1191
1192
1193 static void cleanup_transaction(struct btrfs_trans_handle *trans,
1194                                 struct btrfs_root *root)
1195 {
1196         struct btrfs_transaction *cur_trans = trans->transaction;
1197
1198         WARN_ON(trans->use_count > 1);
1199
1200         spin_lock(&root->fs_info->trans_lock);
1201         list_del_init(&cur_trans->list);
1202         spin_unlock(&root->fs_info->trans_lock);
1203
1204         btrfs_cleanup_one_transaction(trans->transaction, root);
1205
1206         put_transaction(cur_trans);
1207         put_transaction(cur_trans);
1208
1209         trace_btrfs_transaction_commit(root);
1210
1211         btrfs_scrub_continue(root);
1212
1213         if (current->journal_info == trans)
1214                 current->journal_info = NULL;
1215
1216         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1217 }
1218
1219 /*
1220  * btrfs_transaction state sequence:
1221  *    in_commit = 0, blocked = 0  (initial)
1222  *    in_commit = 1, blocked = 1
1223  *    blocked = 0
1224  *    commit_done = 1
1225  */
1226 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1227                              struct btrfs_root *root)
1228 {
1229         unsigned long joined = 0;
1230         struct btrfs_transaction *cur_trans = trans->transaction;
1231         struct btrfs_transaction *prev_trans = NULL;
1232         DEFINE_WAIT(wait);
1233         int ret = -EIO;
1234         int should_grow = 0;
1235         unsigned long now = get_seconds();
1236         int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
1237
1238         btrfs_run_ordered_operations(root, 0);
1239
1240         btrfs_trans_release_metadata(trans, root);
1241         trans->block_rsv = NULL;
1242
1243         if (cur_trans->aborted)
1244                 goto cleanup_transaction;
1245
1246         /* make a pass through all the delayed refs we have so far
1247          * any runnings procs may add more while we are here
1248          */
1249         ret = btrfs_run_delayed_refs(trans, root, 0);
1250         if (ret)
1251                 goto cleanup_transaction;
1252
1253         cur_trans = trans->transaction;
1254
1255         /*
1256          * set the flushing flag so procs in this transaction have to
1257          * start sending their work down.
1258          */
1259         cur_trans->delayed_refs.flushing = 1;
1260
1261         ret = btrfs_run_delayed_refs(trans, root, 0);
1262         if (ret)
1263                 goto cleanup_transaction;
1264
1265         spin_lock(&cur_trans->commit_lock);
1266         if (cur_trans->in_commit) {
1267                 spin_unlock(&cur_trans->commit_lock);
1268                 atomic_inc(&cur_trans->use_count);
1269                 ret = btrfs_end_transaction(trans, root);
1270
1271                 wait_for_commit(root, cur_trans);
1272
1273                 put_transaction(cur_trans);
1274
1275                 return ret;
1276         }
1277
1278         trans->transaction->in_commit = 1;
1279         trans->transaction->blocked = 1;
1280         spin_unlock(&cur_trans->commit_lock);
1281         wake_up(&root->fs_info->transaction_blocked_wait);
1282
1283         spin_lock(&root->fs_info->trans_lock);
1284         if (cur_trans->list.prev != &root->fs_info->trans_list) {
1285                 prev_trans = list_entry(cur_trans->list.prev,
1286                                         struct btrfs_transaction, list);
1287                 if (!prev_trans->commit_done) {
1288                         atomic_inc(&prev_trans->use_count);
1289                         spin_unlock(&root->fs_info->trans_lock);
1290
1291                         wait_for_commit(root, prev_trans);
1292
1293                         put_transaction(prev_trans);
1294                 } else {
1295                         spin_unlock(&root->fs_info->trans_lock);
1296                 }
1297         } else {
1298                 spin_unlock(&root->fs_info->trans_lock);
1299         }
1300
1301         if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
1302                 should_grow = 1;
1303
1304         do {
1305                 int snap_pending = 0;
1306
1307                 joined = cur_trans->num_joined;
1308                 if (!list_empty(&trans->transaction->pending_snapshots))
1309                         snap_pending = 1;
1310
1311                 WARN_ON(cur_trans != trans->transaction);
1312
1313                 if (flush_on_commit || snap_pending) {
1314                         btrfs_start_delalloc_inodes(root, 1);
1315                         btrfs_wait_ordered_extents(root, 0, 1);
1316                 }
1317
1318                 ret = btrfs_run_delayed_items(trans, root);
1319                 if (ret)
1320                         goto cleanup_transaction;
1321
1322                 /*
1323                  * rename don't use btrfs_join_transaction, so, once we
1324                  * set the transaction to blocked above, we aren't going
1325                  * to get any new ordered operations.  We can safely run
1326                  * it here and no for sure that nothing new will be added
1327                  * to the list
1328                  */
1329                 btrfs_run_ordered_operations(root, 1);
1330
1331                 prepare_to_wait(&cur_trans->writer_wait, &wait,
1332                                 TASK_UNINTERRUPTIBLE);
1333
1334                 if (atomic_read(&cur_trans->num_writers) > 1)
1335                         schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1336                 else if (should_grow)
1337                         schedule_timeout(1);
1338
1339                 finish_wait(&cur_trans->writer_wait, &wait);
1340         } while (atomic_read(&cur_trans->num_writers) > 1 ||
1341                  (should_grow && cur_trans->num_joined != joined));
1342
1343         /*
1344          * Ok now we need to make sure to block out any other joins while we
1345          * commit the transaction.  We could have started a join before setting
1346          * no_join so make sure to wait for num_writers to == 1 again.
1347          */
1348         spin_lock(&root->fs_info->trans_lock);
1349         root->fs_info->trans_no_join = 1;
1350         spin_unlock(&root->fs_info->trans_lock);
1351         wait_event(cur_trans->writer_wait,
1352                    atomic_read(&cur_trans->num_writers) == 1);
1353
1354         /*
1355          * the reloc mutex makes sure that we stop
1356          * the balancing code from coming in and moving
1357          * extents around in the middle of the commit
1358          */
1359         mutex_lock(&root->fs_info->reloc_mutex);
1360
1361         ret = btrfs_run_delayed_items(trans, root);
1362         if (ret) {
1363                 mutex_unlock(&root->fs_info->reloc_mutex);
1364                 goto cleanup_transaction;
1365         }
1366
1367         ret = create_pending_snapshots(trans, root->fs_info);
1368         if (ret) {
1369                 mutex_unlock(&root->fs_info->reloc_mutex);
1370                 goto cleanup_transaction;
1371         }
1372
1373         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1374         if (ret) {
1375                 mutex_unlock(&root->fs_info->reloc_mutex);
1376                 goto cleanup_transaction;
1377         }
1378
1379         /*
1380          * make sure none of the code above managed to slip in a
1381          * delayed item
1382          */
1383         btrfs_assert_delayed_root_empty(root);
1384
1385         WARN_ON(cur_trans != trans->transaction);
1386
1387         btrfs_scrub_pause(root);
1388         /* btrfs_commit_tree_roots is responsible for getting the
1389          * various roots consistent with each other.  Every pointer
1390          * in the tree of tree roots has to point to the most up to date
1391          * root for every subvolume and other tree.  So, we have to keep
1392          * the tree logging code from jumping in and changing any
1393          * of the trees.
1394          *
1395          * At this point in the commit, there can't be any tree-log
1396          * writers, but a little lower down we drop the trans mutex
1397          * and let new people in.  By holding the tree_log_mutex
1398          * from now until after the super is written, we avoid races
1399          * with the tree-log code.
1400          */
1401         mutex_lock(&root->fs_info->tree_log_mutex);
1402
1403         ret = commit_fs_roots(trans, root);
1404         if (ret) {
1405                 mutex_unlock(&root->fs_info->tree_log_mutex);
1406                 mutex_unlock(&root->fs_info->reloc_mutex);
1407                 goto cleanup_transaction;
1408         }
1409
1410         /* commit_fs_roots gets rid of all the tree log roots, it is now
1411          * safe to free the root of tree log roots
1412          */
1413         btrfs_free_log_root_tree(trans, root->fs_info);
1414
1415         ret = commit_cowonly_roots(trans, root);
1416         if (ret) {
1417                 mutex_unlock(&root->fs_info->tree_log_mutex);
1418                 mutex_unlock(&root->fs_info->reloc_mutex);
1419                 goto cleanup_transaction;
1420         }
1421
1422         btrfs_prepare_extent_commit(trans, root);
1423
1424         cur_trans = root->fs_info->running_transaction;
1425
1426         btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1427                             root->fs_info->tree_root->node);
1428         switch_commit_root(root->fs_info->tree_root);
1429
1430         btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1431                             root->fs_info->chunk_root->node);
1432         switch_commit_root(root->fs_info->chunk_root);
1433
1434         update_super_roots(root);
1435
1436         if (!root->fs_info->log_root_recovering) {
1437                 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
1438                 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1439         }
1440
1441         memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
1442                sizeof(*root->fs_info->super_copy));
1443
1444         trans->transaction->blocked = 0;
1445         spin_lock(&root->fs_info->trans_lock);
1446         root->fs_info->running_transaction = NULL;
1447         root->fs_info->trans_no_join = 0;
1448         spin_unlock(&root->fs_info->trans_lock);
1449         mutex_unlock(&root->fs_info->reloc_mutex);
1450
1451         wake_up(&root->fs_info->transaction_wait);
1452
1453         ret = btrfs_write_and_wait_transaction(trans, root);
1454         if (ret) {
1455                 btrfs_error(root->fs_info, ret,
1456                             "Error while writing out transaction.");
1457                 mutex_unlock(&root->fs_info->tree_log_mutex);
1458                 goto cleanup_transaction;
1459         }
1460
1461         ret = write_ctree_super(trans, root, 0);
1462         if (ret) {
1463                 mutex_unlock(&root->fs_info->tree_log_mutex);
1464                 goto cleanup_transaction;
1465         }
1466
1467         /*
1468          * the super is written, we can safely allow the tree-loggers
1469          * to go about their business
1470          */
1471         mutex_unlock(&root->fs_info->tree_log_mutex);
1472
1473         btrfs_finish_extent_commit(trans, root);
1474
1475         cur_trans->commit_done = 1;
1476
1477         root->fs_info->last_trans_committed = cur_trans->transid;
1478
1479         wake_up(&cur_trans->commit_wait);
1480
1481         spin_lock(&root->fs_info->trans_lock);
1482         list_del_init(&cur_trans->list);
1483         spin_unlock(&root->fs_info->trans_lock);
1484
1485         put_transaction(cur_trans);
1486         put_transaction(cur_trans);
1487
1488         trace_btrfs_transaction_commit(root);
1489
1490         btrfs_scrub_continue(root);
1491
1492         if (current->journal_info == trans)
1493                 current->journal_info = NULL;
1494
1495         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1496
1497         if (current != root->fs_info->transaction_kthread)
1498                 btrfs_run_delayed_iputs(root);
1499
1500         return ret;
1501
1502 cleanup_transaction:
1503         btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
1504 //      WARN_ON(1);
1505         if (current->journal_info == trans)
1506                 current->journal_info = NULL;
1507         cleanup_transaction(trans, root);
1508
1509         return ret;
1510 }
1511
1512 /*
1513  * interface function to delete all the snapshots we have scheduled for deletion
1514  */
1515 int btrfs_clean_old_snapshots(struct btrfs_root *root)
1516 {
1517         LIST_HEAD(list);
1518         struct btrfs_fs_info *fs_info = root->fs_info;
1519
1520         spin_lock(&fs_info->trans_lock);
1521         list_splice_init(&fs_info->dead_roots, &list);
1522         spin_unlock(&fs_info->trans_lock);
1523
1524         while (!list_empty(&list)) {
1525                 int ret;
1526
1527                 root = list_entry(list.next, struct btrfs_root, root_list);
1528                 list_del(&root->root_list);
1529
1530                 btrfs_kill_all_delayed_nodes(root);
1531
1532                 if (btrfs_header_backref_rev(root->node) <
1533                     BTRFS_MIXED_BACKREF_REV)
1534                         ret = btrfs_drop_snapshot(root, NULL, 0, 0);
1535                 else
1536                         ret =btrfs_drop_snapshot(root, NULL, 1, 0);
1537                 BUG_ON(ret < 0);
1538         }
1539         return 0;
1540 }