Btrfs: take overflow into account in reserving space
[firefly-linux-kernel-4.4.55.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include "compat.h"
27 #include "hash.h"
28 #include "ctree.h"
29 #include "disk-io.h"
30 #include "print-tree.h"
31 #include "transaction.h"
32 #include "volumes.h"
33 #include "locking.h"
34 #include "free-space-cache.h"
35
36 /* control flags for do_chunk_alloc's force field
37  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
38  * if we really need one.
39  *
40  * CHUNK_ALLOC_FORCE means it must try to allocate one
41  *
42  * CHUNK_ALLOC_LIMITED means to only try and allocate one
43  * if we have very few chunks already allocated.  This is
44  * used as part of the clustering code to help make sure
45  * we have a good pool of storage to cluster in, without
46  * filling the FS with empty chunks
47  *
48  */
49 enum {
50         CHUNK_ALLOC_NO_FORCE = 0,
51         CHUNK_ALLOC_FORCE = 1,
52         CHUNK_ALLOC_LIMITED = 2,
53 };
54
55 /*
56  * Control how reservations are dealt with.
57  *
58  * RESERVE_FREE - freeing a reservation.
59  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
60  *   ENOSPC accounting
61  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
62  *   bytes_may_use as the ENOSPC accounting is done elsewhere
63  */
64 enum {
65         RESERVE_FREE = 0,
66         RESERVE_ALLOC = 1,
67         RESERVE_ALLOC_NO_ACCOUNT = 2,
68 };
69
70 static int update_block_group(struct btrfs_trans_handle *trans,
71                               struct btrfs_root *root,
72                               u64 bytenr, u64 num_bytes, int alloc);
73 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
74                                 struct btrfs_root *root,
75                                 u64 bytenr, u64 num_bytes, u64 parent,
76                                 u64 root_objectid, u64 owner_objectid,
77                                 u64 owner_offset, int refs_to_drop,
78                                 struct btrfs_delayed_extent_op *extra_op);
79 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
80                                     struct extent_buffer *leaf,
81                                     struct btrfs_extent_item *ei);
82 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
83                                       struct btrfs_root *root,
84                                       u64 parent, u64 root_objectid,
85                                       u64 flags, u64 owner, u64 offset,
86                                       struct btrfs_key *ins, int ref_mod);
87 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
88                                      struct btrfs_root *root,
89                                      u64 parent, u64 root_objectid,
90                                      u64 flags, struct btrfs_disk_key *key,
91                                      int level, struct btrfs_key *ins);
92 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
93                           struct btrfs_root *extent_root, u64 alloc_bytes,
94                           u64 flags, int force);
95 static int find_next_key(struct btrfs_path *path, int level,
96                          struct btrfs_key *key);
97 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
98                             int dump_block_groups);
99 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
100                                        u64 num_bytes, int reserve);
101
102 static noinline int
103 block_group_cache_done(struct btrfs_block_group_cache *cache)
104 {
105         smp_mb();
106         return cache->cached == BTRFS_CACHE_FINISHED;
107 }
108
109 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
110 {
111         return (cache->flags & bits) == bits;
112 }
113
114 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
115 {
116         atomic_inc(&cache->count);
117 }
118
119 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
120 {
121         if (atomic_dec_and_test(&cache->count)) {
122                 WARN_ON(cache->pinned > 0);
123                 WARN_ON(cache->reserved > 0);
124                 kfree(cache->free_space_ctl);
125                 kfree(cache);
126         }
127 }
128
129 /*
130  * this adds the block group to the fs_info rb tree for the block group
131  * cache
132  */
133 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
134                                 struct btrfs_block_group_cache *block_group)
135 {
136         struct rb_node **p;
137         struct rb_node *parent = NULL;
138         struct btrfs_block_group_cache *cache;
139
140         spin_lock(&info->block_group_cache_lock);
141         p = &info->block_group_cache_tree.rb_node;
142
143         while (*p) {
144                 parent = *p;
145                 cache = rb_entry(parent, struct btrfs_block_group_cache,
146                                  cache_node);
147                 if (block_group->key.objectid < cache->key.objectid) {
148                         p = &(*p)->rb_left;
149                 } else if (block_group->key.objectid > cache->key.objectid) {
150                         p = &(*p)->rb_right;
151                 } else {
152                         spin_unlock(&info->block_group_cache_lock);
153                         return -EEXIST;
154                 }
155         }
156
157         rb_link_node(&block_group->cache_node, parent, p);
158         rb_insert_color(&block_group->cache_node,
159                         &info->block_group_cache_tree);
160         spin_unlock(&info->block_group_cache_lock);
161
162         return 0;
163 }
164
165 /*
166  * This will return the block group at or after bytenr if contains is 0, else
167  * it will return the block group that contains the bytenr
168  */
169 static struct btrfs_block_group_cache *
170 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
171                               int contains)
172 {
173         struct btrfs_block_group_cache *cache, *ret = NULL;
174         struct rb_node *n;
175         u64 end, start;
176
177         spin_lock(&info->block_group_cache_lock);
178         n = info->block_group_cache_tree.rb_node;
179
180         while (n) {
181                 cache = rb_entry(n, struct btrfs_block_group_cache,
182                                  cache_node);
183                 end = cache->key.objectid + cache->key.offset - 1;
184                 start = cache->key.objectid;
185
186                 if (bytenr < start) {
187                         if (!contains && (!ret || start < ret->key.objectid))
188                                 ret = cache;
189                         n = n->rb_left;
190                 } else if (bytenr > start) {
191                         if (contains && bytenr <= end) {
192                                 ret = cache;
193                                 break;
194                         }
195                         n = n->rb_right;
196                 } else {
197                         ret = cache;
198                         break;
199                 }
200         }
201         if (ret)
202                 btrfs_get_block_group(ret);
203         spin_unlock(&info->block_group_cache_lock);
204
205         return ret;
206 }
207
208 static int add_excluded_extent(struct btrfs_root *root,
209                                u64 start, u64 num_bytes)
210 {
211         u64 end = start + num_bytes - 1;
212         set_extent_bits(&root->fs_info->freed_extents[0],
213                         start, end, EXTENT_UPTODATE, GFP_NOFS);
214         set_extent_bits(&root->fs_info->freed_extents[1],
215                         start, end, EXTENT_UPTODATE, GFP_NOFS);
216         return 0;
217 }
218
219 static void free_excluded_extents(struct btrfs_root *root,
220                                   struct btrfs_block_group_cache *cache)
221 {
222         u64 start, end;
223
224         start = cache->key.objectid;
225         end = start + cache->key.offset - 1;
226
227         clear_extent_bits(&root->fs_info->freed_extents[0],
228                           start, end, EXTENT_UPTODATE, GFP_NOFS);
229         clear_extent_bits(&root->fs_info->freed_extents[1],
230                           start, end, EXTENT_UPTODATE, GFP_NOFS);
231 }
232
233 static int exclude_super_stripes(struct btrfs_root *root,
234                                  struct btrfs_block_group_cache *cache)
235 {
236         u64 bytenr;
237         u64 *logical;
238         int stripe_len;
239         int i, nr, ret;
240
241         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
242                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
243                 cache->bytes_super += stripe_len;
244                 ret = add_excluded_extent(root, cache->key.objectid,
245                                           stripe_len);
246                 BUG_ON(ret);
247         }
248
249         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
250                 bytenr = btrfs_sb_offset(i);
251                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
252                                        cache->key.objectid, bytenr,
253                                        0, &logical, &nr, &stripe_len);
254                 BUG_ON(ret);
255
256                 while (nr--) {
257                         cache->bytes_super += stripe_len;
258                         ret = add_excluded_extent(root, logical[nr],
259                                                   stripe_len);
260                         BUG_ON(ret);
261                 }
262
263                 kfree(logical);
264         }
265         return 0;
266 }
267
268 static struct btrfs_caching_control *
269 get_caching_control(struct btrfs_block_group_cache *cache)
270 {
271         struct btrfs_caching_control *ctl;
272
273         spin_lock(&cache->lock);
274         if (cache->cached != BTRFS_CACHE_STARTED) {
275                 spin_unlock(&cache->lock);
276                 return NULL;
277         }
278
279         /* We're loading it the fast way, so we don't have a caching_ctl. */
280         if (!cache->caching_ctl) {
281                 spin_unlock(&cache->lock);
282                 return NULL;
283         }
284
285         ctl = cache->caching_ctl;
286         atomic_inc(&ctl->count);
287         spin_unlock(&cache->lock);
288         return ctl;
289 }
290
291 static void put_caching_control(struct btrfs_caching_control *ctl)
292 {
293         if (atomic_dec_and_test(&ctl->count))
294                 kfree(ctl);
295 }
296
297 /*
298  * this is only called by cache_block_group, since we could have freed extents
299  * we need to check the pinned_extents for any extents that can't be used yet
300  * since their free space will be released as soon as the transaction commits.
301  */
302 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
303                               struct btrfs_fs_info *info, u64 start, u64 end)
304 {
305         u64 extent_start, extent_end, size, total_added = 0;
306         int ret;
307
308         while (start < end) {
309                 ret = find_first_extent_bit(info->pinned_extents, start,
310                                             &extent_start, &extent_end,
311                                             EXTENT_DIRTY | EXTENT_UPTODATE);
312                 if (ret)
313                         break;
314
315                 if (extent_start <= start) {
316                         start = extent_end + 1;
317                 } else if (extent_start > start && extent_start < end) {
318                         size = extent_start - start;
319                         total_added += size;
320                         ret = btrfs_add_free_space(block_group, start,
321                                                    size);
322                         BUG_ON(ret);
323                         start = extent_end + 1;
324                 } else {
325                         break;
326                 }
327         }
328
329         if (start < end) {
330                 size = end - start;
331                 total_added += size;
332                 ret = btrfs_add_free_space(block_group, start, size);
333                 BUG_ON(ret);
334         }
335
336         return total_added;
337 }
338
339 static noinline void caching_thread(struct btrfs_work *work)
340 {
341         struct btrfs_block_group_cache *block_group;
342         struct btrfs_fs_info *fs_info;
343         struct btrfs_caching_control *caching_ctl;
344         struct btrfs_root *extent_root;
345         struct btrfs_path *path;
346         struct extent_buffer *leaf;
347         struct btrfs_key key;
348         u64 total_found = 0;
349         u64 last = 0;
350         u32 nritems;
351         int ret = 0;
352
353         caching_ctl = container_of(work, struct btrfs_caching_control, work);
354         block_group = caching_ctl->block_group;
355         fs_info = block_group->fs_info;
356         extent_root = fs_info->extent_root;
357
358         path = btrfs_alloc_path();
359         if (!path)
360                 goto out;
361
362         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
363
364         /*
365          * We don't want to deadlock with somebody trying to allocate a new
366          * extent for the extent root while also trying to search the extent
367          * root to add free space.  So we skip locking and search the commit
368          * root, since its read-only
369          */
370         path->skip_locking = 1;
371         path->search_commit_root = 1;
372         path->reada = 1;
373
374         key.objectid = last;
375         key.offset = 0;
376         key.type = BTRFS_EXTENT_ITEM_KEY;
377 again:
378         mutex_lock(&caching_ctl->mutex);
379         /* need to make sure the commit_root doesn't disappear */
380         down_read(&fs_info->extent_commit_sem);
381
382         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
383         if (ret < 0)
384                 goto err;
385
386         leaf = path->nodes[0];
387         nritems = btrfs_header_nritems(leaf);
388
389         while (1) {
390                 if (btrfs_fs_closing(fs_info) > 1) {
391                         last = (u64)-1;
392                         break;
393                 }
394
395                 if (path->slots[0] < nritems) {
396                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
397                 } else {
398                         ret = find_next_key(path, 0, &key);
399                         if (ret)
400                                 break;
401
402                         if (need_resched() ||
403                             btrfs_next_leaf(extent_root, path)) {
404                                 caching_ctl->progress = last;
405                                 btrfs_release_path(path);
406                                 up_read(&fs_info->extent_commit_sem);
407                                 mutex_unlock(&caching_ctl->mutex);
408                                 cond_resched();
409                                 goto again;
410                         }
411                         leaf = path->nodes[0];
412                         nritems = btrfs_header_nritems(leaf);
413                         continue;
414                 }
415
416                 if (key.objectid < block_group->key.objectid) {
417                         path->slots[0]++;
418                         continue;
419                 }
420
421                 if (key.objectid >= block_group->key.objectid +
422                     block_group->key.offset)
423                         break;
424
425                 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
426                         total_found += add_new_free_space(block_group,
427                                                           fs_info, last,
428                                                           key.objectid);
429                         last = key.objectid + key.offset;
430
431                         if (total_found > (1024 * 1024 * 2)) {
432                                 total_found = 0;
433                                 wake_up(&caching_ctl->wait);
434                         }
435                 }
436                 path->slots[0]++;
437         }
438         ret = 0;
439
440         total_found += add_new_free_space(block_group, fs_info, last,
441                                           block_group->key.objectid +
442                                           block_group->key.offset);
443         caching_ctl->progress = (u64)-1;
444
445         spin_lock(&block_group->lock);
446         block_group->caching_ctl = NULL;
447         block_group->cached = BTRFS_CACHE_FINISHED;
448         spin_unlock(&block_group->lock);
449
450 err:
451         btrfs_free_path(path);
452         up_read(&fs_info->extent_commit_sem);
453
454         free_excluded_extents(extent_root, block_group);
455
456         mutex_unlock(&caching_ctl->mutex);
457 out:
458         wake_up(&caching_ctl->wait);
459
460         put_caching_control(caching_ctl);
461         btrfs_put_block_group(block_group);
462 }
463
464 static int cache_block_group(struct btrfs_block_group_cache *cache,
465                              struct btrfs_trans_handle *trans,
466                              struct btrfs_root *root,
467                              int load_cache_only)
468 {
469         struct btrfs_fs_info *fs_info = cache->fs_info;
470         struct btrfs_caching_control *caching_ctl;
471         int ret = 0;
472
473         smp_mb();
474         if (cache->cached != BTRFS_CACHE_NO)
475                 return 0;
476
477         /*
478          * We can't do the read from on-disk cache during a commit since we need
479          * to have the normal tree locking.  Also if we are currently trying to
480          * allocate blocks for the tree root we can't do the fast caching since
481          * we likely hold important locks.
482          */
483         if (trans && (!trans->transaction->in_commit) &&
484             (root && root != root->fs_info->tree_root) &&
485             btrfs_test_opt(root, SPACE_CACHE)) {
486                 spin_lock(&cache->lock);
487                 if (cache->cached != BTRFS_CACHE_NO) {
488                         spin_unlock(&cache->lock);
489                         return 0;
490                 }
491                 cache->cached = BTRFS_CACHE_STARTED;
492                 spin_unlock(&cache->lock);
493
494                 ret = load_free_space_cache(fs_info, cache);
495
496                 spin_lock(&cache->lock);
497                 if (ret == 1) {
498                         cache->cached = BTRFS_CACHE_FINISHED;
499                         cache->last_byte_to_unpin = (u64)-1;
500                 } else {
501                         cache->cached = BTRFS_CACHE_NO;
502                 }
503                 spin_unlock(&cache->lock);
504                 if (ret == 1) {
505                         free_excluded_extents(fs_info->extent_root, cache);
506                         return 0;
507                 }
508         }
509
510         if (load_cache_only)
511                 return 0;
512
513         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
514         BUG_ON(!caching_ctl);
515
516         INIT_LIST_HEAD(&caching_ctl->list);
517         mutex_init(&caching_ctl->mutex);
518         init_waitqueue_head(&caching_ctl->wait);
519         caching_ctl->block_group = cache;
520         caching_ctl->progress = cache->key.objectid;
521         /* one for caching kthread, one for caching block group list */
522         atomic_set(&caching_ctl->count, 2);
523         caching_ctl->work.func = caching_thread;
524
525         spin_lock(&cache->lock);
526         if (cache->cached != BTRFS_CACHE_NO) {
527                 spin_unlock(&cache->lock);
528                 kfree(caching_ctl);
529                 return 0;
530         }
531         cache->caching_ctl = caching_ctl;
532         cache->cached = BTRFS_CACHE_STARTED;
533         spin_unlock(&cache->lock);
534
535         down_write(&fs_info->extent_commit_sem);
536         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
537         up_write(&fs_info->extent_commit_sem);
538
539         btrfs_get_block_group(cache);
540
541         btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
542
543         return ret;
544 }
545
546 /*
547  * return the block group that starts at or after bytenr
548  */
549 static struct btrfs_block_group_cache *
550 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
551 {
552         struct btrfs_block_group_cache *cache;
553
554         cache = block_group_cache_tree_search(info, bytenr, 0);
555
556         return cache;
557 }
558
559 /*
560  * return the block group that contains the given bytenr
561  */
562 struct btrfs_block_group_cache *btrfs_lookup_block_group(
563                                                  struct btrfs_fs_info *info,
564                                                  u64 bytenr)
565 {
566         struct btrfs_block_group_cache *cache;
567
568         cache = block_group_cache_tree_search(info, bytenr, 1);
569
570         return cache;
571 }
572
573 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
574                                                   u64 flags)
575 {
576         struct list_head *head = &info->space_info;
577         struct btrfs_space_info *found;
578
579         flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM |
580                  BTRFS_BLOCK_GROUP_METADATA;
581
582         rcu_read_lock();
583         list_for_each_entry_rcu(found, head, list) {
584                 if (found->flags & flags) {
585                         rcu_read_unlock();
586                         return found;
587                 }
588         }
589         rcu_read_unlock();
590         return NULL;
591 }
592
593 /*
594  * after adding space to the filesystem, we need to clear the full flags
595  * on all the space infos.
596  */
597 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
598 {
599         struct list_head *head = &info->space_info;
600         struct btrfs_space_info *found;
601
602         rcu_read_lock();
603         list_for_each_entry_rcu(found, head, list)
604                 found->full = 0;
605         rcu_read_unlock();
606 }
607
608 static u64 div_factor(u64 num, int factor)
609 {
610         if (factor == 10)
611                 return num;
612         num *= factor;
613         do_div(num, 10);
614         return num;
615 }
616
617 static u64 div_factor_fine(u64 num, int factor)
618 {
619         if (factor == 100)
620                 return num;
621         num *= factor;
622         do_div(num, 100);
623         return num;
624 }
625
626 u64 btrfs_find_block_group(struct btrfs_root *root,
627                            u64 search_start, u64 search_hint, int owner)
628 {
629         struct btrfs_block_group_cache *cache;
630         u64 used;
631         u64 last = max(search_hint, search_start);
632         u64 group_start = 0;
633         int full_search = 0;
634         int factor = 9;
635         int wrapped = 0;
636 again:
637         while (1) {
638                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
639                 if (!cache)
640                         break;
641
642                 spin_lock(&cache->lock);
643                 last = cache->key.objectid + cache->key.offset;
644                 used = btrfs_block_group_used(&cache->item);
645
646                 if ((full_search || !cache->ro) &&
647                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
648                         if (used + cache->pinned + cache->reserved <
649                             div_factor(cache->key.offset, factor)) {
650                                 group_start = cache->key.objectid;
651                                 spin_unlock(&cache->lock);
652                                 btrfs_put_block_group(cache);
653                                 goto found;
654                         }
655                 }
656                 spin_unlock(&cache->lock);
657                 btrfs_put_block_group(cache);
658                 cond_resched();
659         }
660         if (!wrapped) {
661                 last = search_start;
662                 wrapped = 1;
663                 goto again;
664         }
665         if (!full_search && factor < 10) {
666                 last = search_start;
667                 full_search = 1;
668                 factor = 10;
669                 goto again;
670         }
671 found:
672         return group_start;
673 }
674
675 /* simple helper to search for an existing extent at a given offset */
676 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
677 {
678         int ret;
679         struct btrfs_key key;
680         struct btrfs_path *path;
681
682         path = btrfs_alloc_path();
683         if (!path)
684                 return -ENOMEM;
685
686         key.objectid = start;
687         key.offset = len;
688         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
689         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
690                                 0, 0);
691         btrfs_free_path(path);
692         return ret;
693 }
694
695 /*
696  * helper function to lookup reference count and flags of extent.
697  *
698  * the head node for delayed ref is used to store the sum of all the
699  * reference count modifications queued up in the rbtree. the head
700  * node may also store the extent flags to set. This way you can check
701  * to see what the reference count and extent flags would be if all of
702  * the delayed refs are not processed.
703  */
704 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
705                              struct btrfs_root *root, u64 bytenr,
706                              u64 num_bytes, u64 *refs, u64 *flags)
707 {
708         struct btrfs_delayed_ref_head *head;
709         struct btrfs_delayed_ref_root *delayed_refs;
710         struct btrfs_path *path;
711         struct btrfs_extent_item *ei;
712         struct extent_buffer *leaf;
713         struct btrfs_key key;
714         u32 item_size;
715         u64 num_refs;
716         u64 extent_flags;
717         int ret;
718
719         path = btrfs_alloc_path();
720         if (!path)
721                 return -ENOMEM;
722
723         key.objectid = bytenr;
724         key.type = BTRFS_EXTENT_ITEM_KEY;
725         key.offset = num_bytes;
726         if (!trans) {
727                 path->skip_locking = 1;
728                 path->search_commit_root = 1;
729         }
730 again:
731         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
732                                 &key, path, 0, 0);
733         if (ret < 0)
734                 goto out_free;
735
736         if (ret == 0) {
737                 leaf = path->nodes[0];
738                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
739                 if (item_size >= sizeof(*ei)) {
740                         ei = btrfs_item_ptr(leaf, path->slots[0],
741                                             struct btrfs_extent_item);
742                         num_refs = btrfs_extent_refs(leaf, ei);
743                         extent_flags = btrfs_extent_flags(leaf, ei);
744                 } else {
745 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
746                         struct btrfs_extent_item_v0 *ei0;
747                         BUG_ON(item_size != sizeof(*ei0));
748                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
749                                              struct btrfs_extent_item_v0);
750                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
751                         /* FIXME: this isn't correct for data */
752                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
753 #else
754                         BUG();
755 #endif
756                 }
757                 BUG_ON(num_refs == 0);
758         } else {
759                 num_refs = 0;
760                 extent_flags = 0;
761                 ret = 0;
762         }
763
764         if (!trans)
765                 goto out;
766
767         delayed_refs = &trans->transaction->delayed_refs;
768         spin_lock(&delayed_refs->lock);
769         head = btrfs_find_delayed_ref_head(trans, bytenr);
770         if (head) {
771                 if (!mutex_trylock(&head->mutex)) {
772                         atomic_inc(&head->node.refs);
773                         spin_unlock(&delayed_refs->lock);
774
775                         btrfs_release_path(path);
776
777                         /*
778                          * Mutex was contended, block until it's released and try
779                          * again
780                          */
781                         mutex_lock(&head->mutex);
782                         mutex_unlock(&head->mutex);
783                         btrfs_put_delayed_ref(&head->node);
784                         goto again;
785                 }
786                 if (head->extent_op && head->extent_op->update_flags)
787                         extent_flags |= head->extent_op->flags_to_set;
788                 else
789                         BUG_ON(num_refs == 0);
790
791                 num_refs += head->node.ref_mod;
792                 mutex_unlock(&head->mutex);
793         }
794         spin_unlock(&delayed_refs->lock);
795 out:
796         WARN_ON(num_refs == 0);
797         if (refs)
798                 *refs = num_refs;
799         if (flags)
800                 *flags = extent_flags;
801 out_free:
802         btrfs_free_path(path);
803         return ret;
804 }
805
806 /*
807  * Back reference rules.  Back refs have three main goals:
808  *
809  * 1) differentiate between all holders of references to an extent so that
810  *    when a reference is dropped we can make sure it was a valid reference
811  *    before freeing the extent.
812  *
813  * 2) Provide enough information to quickly find the holders of an extent
814  *    if we notice a given block is corrupted or bad.
815  *
816  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
817  *    maintenance.  This is actually the same as #2, but with a slightly
818  *    different use case.
819  *
820  * There are two kinds of back refs. The implicit back refs is optimized
821  * for pointers in non-shared tree blocks. For a given pointer in a block,
822  * back refs of this kind provide information about the block's owner tree
823  * and the pointer's key. These information allow us to find the block by
824  * b-tree searching. The full back refs is for pointers in tree blocks not
825  * referenced by their owner trees. The location of tree block is recorded
826  * in the back refs. Actually the full back refs is generic, and can be
827  * used in all cases the implicit back refs is used. The major shortcoming
828  * of the full back refs is its overhead. Every time a tree block gets
829  * COWed, we have to update back refs entry for all pointers in it.
830  *
831  * For a newly allocated tree block, we use implicit back refs for
832  * pointers in it. This means most tree related operations only involve
833  * implicit back refs. For a tree block created in old transaction, the
834  * only way to drop a reference to it is COW it. So we can detect the
835  * event that tree block loses its owner tree's reference and do the
836  * back refs conversion.
837  *
838  * When a tree block is COW'd through a tree, there are four cases:
839  *
840  * The reference count of the block is one and the tree is the block's
841  * owner tree. Nothing to do in this case.
842  *
843  * The reference count of the block is one and the tree is not the
844  * block's owner tree. In this case, full back refs is used for pointers
845  * in the block. Remove these full back refs, add implicit back refs for
846  * every pointers in the new block.
847  *
848  * The reference count of the block is greater than one and the tree is
849  * the block's owner tree. In this case, implicit back refs is used for
850  * pointers in the block. Add full back refs for every pointers in the
851  * block, increase lower level extents' reference counts. The original
852  * implicit back refs are entailed to the new block.
853  *
854  * The reference count of the block is greater than one and the tree is
855  * not the block's owner tree. Add implicit back refs for every pointer in
856  * the new block, increase lower level extents' reference count.
857  *
858  * Back Reference Key composing:
859  *
860  * The key objectid corresponds to the first byte in the extent,
861  * The key type is used to differentiate between types of back refs.
862  * There are different meanings of the key offset for different types
863  * of back refs.
864  *
865  * File extents can be referenced by:
866  *
867  * - multiple snapshots, subvolumes, or different generations in one subvol
868  * - different files inside a single subvolume
869  * - different offsets inside a file (bookend extents in file.c)
870  *
871  * The extent ref structure for the implicit back refs has fields for:
872  *
873  * - Objectid of the subvolume root
874  * - objectid of the file holding the reference
875  * - original offset in the file
876  * - how many bookend extents
877  *
878  * The key offset for the implicit back refs is hash of the first
879  * three fields.
880  *
881  * The extent ref structure for the full back refs has field for:
882  *
883  * - number of pointers in the tree leaf
884  *
885  * The key offset for the implicit back refs is the first byte of
886  * the tree leaf
887  *
888  * When a file extent is allocated, The implicit back refs is used.
889  * the fields are filled in:
890  *
891  *     (root_key.objectid, inode objectid, offset in file, 1)
892  *
893  * When a file extent is removed file truncation, we find the
894  * corresponding implicit back refs and check the following fields:
895  *
896  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
897  *
898  * Btree extents can be referenced by:
899  *
900  * - Different subvolumes
901  *
902  * Both the implicit back refs and the full back refs for tree blocks
903  * only consist of key. The key offset for the implicit back refs is
904  * objectid of block's owner tree. The key offset for the full back refs
905  * is the first byte of parent block.
906  *
907  * When implicit back refs is used, information about the lowest key and
908  * level of the tree block are required. These information are stored in
909  * tree block info structure.
910  */
911
912 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
913 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
914                                   struct btrfs_root *root,
915                                   struct btrfs_path *path,
916                                   u64 owner, u32 extra_size)
917 {
918         struct btrfs_extent_item *item;
919         struct btrfs_extent_item_v0 *ei0;
920         struct btrfs_extent_ref_v0 *ref0;
921         struct btrfs_tree_block_info *bi;
922         struct extent_buffer *leaf;
923         struct btrfs_key key;
924         struct btrfs_key found_key;
925         u32 new_size = sizeof(*item);
926         u64 refs;
927         int ret;
928
929         leaf = path->nodes[0];
930         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
931
932         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
933         ei0 = btrfs_item_ptr(leaf, path->slots[0],
934                              struct btrfs_extent_item_v0);
935         refs = btrfs_extent_refs_v0(leaf, ei0);
936
937         if (owner == (u64)-1) {
938                 while (1) {
939                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
940                                 ret = btrfs_next_leaf(root, path);
941                                 if (ret < 0)
942                                         return ret;
943                                 BUG_ON(ret > 0);
944                                 leaf = path->nodes[0];
945                         }
946                         btrfs_item_key_to_cpu(leaf, &found_key,
947                                               path->slots[0]);
948                         BUG_ON(key.objectid != found_key.objectid);
949                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
950                                 path->slots[0]++;
951                                 continue;
952                         }
953                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
954                                               struct btrfs_extent_ref_v0);
955                         owner = btrfs_ref_objectid_v0(leaf, ref0);
956                         break;
957                 }
958         }
959         btrfs_release_path(path);
960
961         if (owner < BTRFS_FIRST_FREE_OBJECTID)
962                 new_size += sizeof(*bi);
963
964         new_size -= sizeof(*ei0);
965         ret = btrfs_search_slot(trans, root, &key, path,
966                                 new_size + extra_size, 1);
967         if (ret < 0)
968                 return ret;
969         BUG_ON(ret);
970
971         ret = btrfs_extend_item(trans, root, path, new_size);
972
973         leaf = path->nodes[0];
974         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
975         btrfs_set_extent_refs(leaf, item, refs);
976         /* FIXME: get real generation */
977         btrfs_set_extent_generation(leaf, item, 0);
978         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
979                 btrfs_set_extent_flags(leaf, item,
980                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
981                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
982                 bi = (struct btrfs_tree_block_info *)(item + 1);
983                 /* FIXME: get first key of the block */
984                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
985                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
986         } else {
987                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
988         }
989         btrfs_mark_buffer_dirty(leaf);
990         return 0;
991 }
992 #endif
993
994 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
995 {
996         u32 high_crc = ~(u32)0;
997         u32 low_crc = ~(u32)0;
998         __le64 lenum;
999
1000         lenum = cpu_to_le64(root_objectid);
1001         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1002         lenum = cpu_to_le64(owner);
1003         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1004         lenum = cpu_to_le64(offset);
1005         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1006
1007         return ((u64)high_crc << 31) ^ (u64)low_crc;
1008 }
1009
1010 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1011                                      struct btrfs_extent_data_ref *ref)
1012 {
1013         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1014                                     btrfs_extent_data_ref_objectid(leaf, ref),
1015                                     btrfs_extent_data_ref_offset(leaf, ref));
1016 }
1017
1018 static int match_extent_data_ref(struct extent_buffer *leaf,
1019                                  struct btrfs_extent_data_ref *ref,
1020                                  u64 root_objectid, u64 owner, u64 offset)
1021 {
1022         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1023             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1024             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1025                 return 0;
1026         return 1;
1027 }
1028
1029 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1030                                            struct btrfs_root *root,
1031                                            struct btrfs_path *path,
1032                                            u64 bytenr, u64 parent,
1033                                            u64 root_objectid,
1034                                            u64 owner, u64 offset)
1035 {
1036         struct btrfs_key key;
1037         struct btrfs_extent_data_ref *ref;
1038         struct extent_buffer *leaf;
1039         u32 nritems;
1040         int ret;
1041         int recow;
1042         int err = -ENOENT;
1043
1044         key.objectid = bytenr;
1045         if (parent) {
1046                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1047                 key.offset = parent;
1048         } else {
1049                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1050                 key.offset = hash_extent_data_ref(root_objectid,
1051                                                   owner, offset);
1052         }
1053 again:
1054         recow = 0;
1055         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1056         if (ret < 0) {
1057                 err = ret;
1058                 goto fail;
1059         }
1060
1061         if (parent) {
1062                 if (!ret)
1063                         return 0;
1064 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1065                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1066                 btrfs_release_path(path);
1067                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1068                 if (ret < 0) {
1069                         err = ret;
1070                         goto fail;
1071                 }
1072                 if (!ret)
1073                         return 0;
1074 #endif
1075                 goto fail;
1076         }
1077
1078         leaf = path->nodes[0];
1079         nritems = btrfs_header_nritems(leaf);
1080         while (1) {
1081                 if (path->slots[0] >= nritems) {
1082                         ret = btrfs_next_leaf(root, path);
1083                         if (ret < 0)
1084                                 err = ret;
1085                         if (ret)
1086                                 goto fail;
1087
1088                         leaf = path->nodes[0];
1089                         nritems = btrfs_header_nritems(leaf);
1090                         recow = 1;
1091                 }
1092
1093                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1094                 if (key.objectid != bytenr ||
1095                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1096                         goto fail;
1097
1098                 ref = btrfs_item_ptr(leaf, path->slots[0],
1099                                      struct btrfs_extent_data_ref);
1100
1101                 if (match_extent_data_ref(leaf, ref, root_objectid,
1102                                           owner, offset)) {
1103                         if (recow) {
1104                                 btrfs_release_path(path);
1105                                 goto again;
1106                         }
1107                         err = 0;
1108                         break;
1109                 }
1110                 path->slots[0]++;
1111         }
1112 fail:
1113         return err;
1114 }
1115
1116 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1117                                            struct btrfs_root *root,
1118                                            struct btrfs_path *path,
1119                                            u64 bytenr, u64 parent,
1120                                            u64 root_objectid, u64 owner,
1121                                            u64 offset, int refs_to_add)
1122 {
1123         struct btrfs_key key;
1124         struct extent_buffer *leaf;
1125         u32 size;
1126         u32 num_refs;
1127         int ret;
1128
1129         key.objectid = bytenr;
1130         if (parent) {
1131                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1132                 key.offset = parent;
1133                 size = sizeof(struct btrfs_shared_data_ref);
1134         } else {
1135                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1136                 key.offset = hash_extent_data_ref(root_objectid,
1137                                                   owner, offset);
1138                 size = sizeof(struct btrfs_extent_data_ref);
1139         }
1140
1141         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1142         if (ret && ret != -EEXIST)
1143                 goto fail;
1144
1145         leaf = path->nodes[0];
1146         if (parent) {
1147                 struct btrfs_shared_data_ref *ref;
1148                 ref = btrfs_item_ptr(leaf, path->slots[0],
1149                                      struct btrfs_shared_data_ref);
1150                 if (ret == 0) {
1151                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1152                 } else {
1153                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1154                         num_refs += refs_to_add;
1155                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1156                 }
1157         } else {
1158                 struct btrfs_extent_data_ref *ref;
1159                 while (ret == -EEXIST) {
1160                         ref = btrfs_item_ptr(leaf, path->slots[0],
1161                                              struct btrfs_extent_data_ref);
1162                         if (match_extent_data_ref(leaf, ref, root_objectid,
1163                                                   owner, offset))
1164                                 break;
1165                         btrfs_release_path(path);
1166                         key.offset++;
1167                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1168                                                       size);
1169                         if (ret && ret != -EEXIST)
1170                                 goto fail;
1171
1172                         leaf = path->nodes[0];
1173                 }
1174                 ref = btrfs_item_ptr(leaf, path->slots[0],
1175                                      struct btrfs_extent_data_ref);
1176                 if (ret == 0) {
1177                         btrfs_set_extent_data_ref_root(leaf, ref,
1178                                                        root_objectid);
1179                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1180                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1181                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1182                 } else {
1183                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1184                         num_refs += refs_to_add;
1185                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1186                 }
1187         }
1188         btrfs_mark_buffer_dirty(leaf);
1189         ret = 0;
1190 fail:
1191         btrfs_release_path(path);
1192         return ret;
1193 }
1194
1195 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1196                                            struct btrfs_root *root,
1197                                            struct btrfs_path *path,
1198                                            int refs_to_drop)
1199 {
1200         struct btrfs_key key;
1201         struct btrfs_extent_data_ref *ref1 = NULL;
1202         struct btrfs_shared_data_ref *ref2 = NULL;
1203         struct extent_buffer *leaf;
1204         u32 num_refs = 0;
1205         int ret = 0;
1206
1207         leaf = path->nodes[0];
1208         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1209
1210         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1211                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1212                                       struct btrfs_extent_data_ref);
1213                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1214         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1215                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1216                                       struct btrfs_shared_data_ref);
1217                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1218 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1219         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1220                 struct btrfs_extent_ref_v0 *ref0;
1221                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1222                                       struct btrfs_extent_ref_v0);
1223                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1224 #endif
1225         } else {
1226                 BUG();
1227         }
1228
1229         BUG_ON(num_refs < refs_to_drop);
1230         num_refs -= refs_to_drop;
1231
1232         if (num_refs == 0) {
1233                 ret = btrfs_del_item(trans, root, path);
1234         } else {
1235                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1236                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1237                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1238                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1239 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1240                 else {
1241                         struct btrfs_extent_ref_v0 *ref0;
1242                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1243                                         struct btrfs_extent_ref_v0);
1244                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1245                 }
1246 #endif
1247                 btrfs_mark_buffer_dirty(leaf);
1248         }
1249         return ret;
1250 }
1251
1252 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1253                                           struct btrfs_path *path,
1254                                           struct btrfs_extent_inline_ref *iref)
1255 {
1256         struct btrfs_key key;
1257         struct extent_buffer *leaf;
1258         struct btrfs_extent_data_ref *ref1;
1259         struct btrfs_shared_data_ref *ref2;
1260         u32 num_refs = 0;
1261
1262         leaf = path->nodes[0];
1263         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1264         if (iref) {
1265                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1266                     BTRFS_EXTENT_DATA_REF_KEY) {
1267                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1268                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1269                 } else {
1270                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1271                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1272                 }
1273         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1274                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1275                                       struct btrfs_extent_data_ref);
1276                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1277         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1278                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1279                                       struct btrfs_shared_data_ref);
1280                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1281 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1282         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1283                 struct btrfs_extent_ref_v0 *ref0;
1284                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1285                                       struct btrfs_extent_ref_v0);
1286                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1287 #endif
1288         } else {
1289                 WARN_ON(1);
1290         }
1291         return num_refs;
1292 }
1293
1294 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1295                                           struct btrfs_root *root,
1296                                           struct btrfs_path *path,
1297                                           u64 bytenr, u64 parent,
1298                                           u64 root_objectid)
1299 {
1300         struct btrfs_key key;
1301         int ret;
1302
1303         key.objectid = bytenr;
1304         if (parent) {
1305                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1306                 key.offset = parent;
1307         } else {
1308                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1309                 key.offset = root_objectid;
1310         }
1311
1312         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1313         if (ret > 0)
1314                 ret = -ENOENT;
1315 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1316         if (ret == -ENOENT && parent) {
1317                 btrfs_release_path(path);
1318                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1319                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1320                 if (ret > 0)
1321                         ret = -ENOENT;
1322         }
1323 #endif
1324         return ret;
1325 }
1326
1327 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1328                                           struct btrfs_root *root,
1329                                           struct btrfs_path *path,
1330                                           u64 bytenr, u64 parent,
1331                                           u64 root_objectid)
1332 {
1333         struct btrfs_key key;
1334         int ret;
1335
1336         key.objectid = bytenr;
1337         if (parent) {
1338                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1339                 key.offset = parent;
1340         } else {
1341                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1342                 key.offset = root_objectid;
1343         }
1344
1345         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1346         btrfs_release_path(path);
1347         return ret;
1348 }
1349
1350 static inline int extent_ref_type(u64 parent, u64 owner)
1351 {
1352         int type;
1353         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1354                 if (parent > 0)
1355                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1356                 else
1357                         type = BTRFS_TREE_BLOCK_REF_KEY;
1358         } else {
1359                 if (parent > 0)
1360                         type = BTRFS_SHARED_DATA_REF_KEY;
1361                 else
1362                         type = BTRFS_EXTENT_DATA_REF_KEY;
1363         }
1364         return type;
1365 }
1366
1367 static int find_next_key(struct btrfs_path *path, int level,
1368                          struct btrfs_key *key)
1369
1370 {
1371         for (; level < BTRFS_MAX_LEVEL; level++) {
1372                 if (!path->nodes[level])
1373                         break;
1374                 if (path->slots[level] + 1 >=
1375                     btrfs_header_nritems(path->nodes[level]))
1376                         continue;
1377                 if (level == 0)
1378                         btrfs_item_key_to_cpu(path->nodes[level], key,
1379                                               path->slots[level] + 1);
1380                 else
1381                         btrfs_node_key_to_cpu(path->nodes[level], key,
1382                                               path->slots[level] + 1);
1383                 return 0;
1384         }
1385         return 1;
1386 }
1387
1388 /*
1389  * look for inline back ref. if back ref is found, *ref_ret is set
1390  * to the address of inline back ref, and 0 is returned.
1391  *
1392  * if back ref isn't found, *ref_ret is set to the address where it
1393  * should be inserted, and -ENOENT is returned.
1394  *
1395  * if insert is true and there are too many inline back refs, the path
1396  * points to the extent item, and -EAGAIN is returned.
1397  *
1398  * NOTE: inline back refs are ordered in the same way that back ref
1399  *       items in the tree are ordered.
1400  */
1401 static noinline_for_stack
1402 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1403                                  struct btrfs_root *root,
1404                                  struct btrfs_path *path,
1405                                  struct btrfs_extent_inline_ref **ref_ret,
1406                                  u64 bytenr, u64 num_bytes,
1407                                  u64 parent, u64 root_objectid,
1408                                  u64 owner, u64 offset, int insert)
1409 {
1410         struct btrfs_key key;
1411         struct extent_buffer *leaf;
1412         struct btrfs_extent_item *ei;
1413         struct btrfs_extent_inline_ref *iref;
1414         u64 flags;
1415         u64 item_size;
1416         unsigned long ptr;
1417         unsigned long end;
1418         int extra_size;
1419         int type;
1420         int want;
1421         int ret;
1422         int err = 0;
1423
1424         key.objectid = bytenr;
1425         key.type = BTRFS_EXTENT_ITEM_KEY;
1426         key.offset = num_bytes;
1427
1428         want = extent_ref_type(parent, owner);
1429         if (insert) {
1430                 extra_size = btrfs_extent_inline_ref_size(want);
1431                 path->keep_locks = 1;
1432         } else
1433                 extra_size = -1;
1434         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1435         if (ret < 0) {
1436                 err = ret;
1437                 goto out;
1438         }
1439         BUG_ON(ret);
1440
1441         leaf = path->nodes[0];
1442         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1443 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1444         if (item_size < sizeof(*ei)) {
1445                 if (!insert) {
1446                         err = -ENOENT;
1447                         goto out;
1448                 }
1449                 ret = convert_extent_item_v0(trans, root, path, owner,
1450                                              extra_size);
1451                 if (ret < 0) {
1452                         err = ret;
1453                         goto out;
1454                 }
1455                 leaf = path->nodes[0];
1456                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1457         }
1458 #endif
1459         BUG_ON(item_size < sizeof(*ei));
1460
1461         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1462         flags = btrfs_extent_flags(leaf, ei);
1463
1464         ptr = (unsigned long)(ei + 1);
1465         end = (unsigned long)ei + item_size;
1466
1467         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1468                 ptr += sizeof(struct btrfs_tree_block_info);
1469                 BUG_ON(ptr > end);
1470         } else {
1471                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1472         }
1473
1474         err = -ENOENT;
1475         while (1) {
1476                 if (ptr >= end) {
1477                         WARN_ON(ptr > end);
1478                         break;
1479                 }
1480                 iref = (struct btrfs_extent_inline_ref *)ptr;
1481                 type = btrfs_extent_inline_ref_type(leaf, iref);
1482                 if (want < type)
1483                         break;
1484                 if (want > type) {
1485                         ptr += btrfs_extent_inline_ref_size(type);
1486                         continue;
1487                 }
1488
1489                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1490                         struct btrfs_extent_data_ref *dref;
1491                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1492                         if (match_extent_data_ref(leaf, dref, root_objectid,
1493                                                   owner, offset)) {
1494                                 err = 0;
1495                                 break;
1496                         }
1497                         if (hash_extent_data_ref_item(leaf, dref) <
1498                             hash_extent_data_ref(root_objectid, owner, offset))
1499                                 break;
1500                 } else {
1501                         u64 ref_offset;
1502                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1503                         if (parent > 0) {
1504                                 if (parent == ref_offset) {
1505                                         err = 0;
1506                                         break;
1507                                 }
1508                                 if (ref_offset < parent)
1509                                         break;
1510                         } else {
1511                                 if (root_objectid == ref_offset) {
1512                                         err = 0;
1513                                         break;
1514                                 }
1515                                 if (ref_offset < root_objectid)
1516                                         break;
1517                         }
1518                 }
1519                 ptr += btrfs_extent_inline_ref_size(type);
1520         }
1521         if (err == -ENOENT && insert) {
1522                 if (item_size + extra_size >=
1523                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1524                         err = -EAGAIN;
1525                         goto out;
1526                 }
1527                 /*
1528                  * To add new inline back ref, we have to make sure
1529                  * there is no corresponding back ref item.
1530                  * For simplicity, we just do not add new inline back
1531                  * ref if there is any kind of item for this block
1532                  */
1533                 if (find_next_key(path, 0, &key) == 0 &&
1534                     key.objectid == bytenr &&
1535                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1536                         err = -EAGAIN;
1537                         goto out;
1538                 }
1539         }
1540         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1541 out:
1542         if (insert) {
1543                 path->keep_locks = 0;
1544                 btrfs_unlock_up_safe(path, 1);
1545         }
1546         return err;
1547 }
1548
1549 /*
1550  * helper to add new inline back ref
1551  */
1552 static noinline_for_stack
1553 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1554                                 struct btrfs_root *root,
1555                                 struct btrfs_path *path,
1556                                 struct btrfs_extent_inline_ref *iref,
1557                                 u64 parent, u64 root_objectid,
1558                                 u64 owner, u64 offset, int refs_to_add,
1559                                 struct btrfs_delayed_extent_op *extent_op)
1560 {
1561         struct extent_buffer *leaf;
1562         struct btrfs_extent_item *ei;
1563         unsigned long ptr;
1564         unsigned long end;
1565         unsigned long item_offset;
1566         u64 refs;
1567         int size;
1568         int type;
1569         int ret;
1570
1571         leaf = path->nodes[0];
1572         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1573         item_offset = (unsigned long)iref - (unsigned long)ei;
1574
1575         type = extent_ref_type(parent, owner);
1576         size = btrfs_extent_inline_ref_size(type);
1577
1578         ret = btrfs_extend_item(trans, root, path, size);
1579
1580         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1581         refs = btrfs_extent_refs(leaf, ei);
1582         refs += refs_to_add;
1583         btrfs_set_extent_refs(leaf, ei, refs);
1584         if (extent_op)
1585                 __run_delayed_extent_op(extent_op, leaf, ei);
1586
1587         ptr = (unsigned long)ei + item_offset;
1588         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1589         if (ptr < end - size)
1590                 memmove_extent_buffer(leaf, ptr + size, ptr,
1591                                       end - size - ptr);
1592
1593         iref = (struct btrfs_extent_inline_ref *)ptr;
1594         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1595         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1596                 struct btrfs_extent_data_ref *dref;
1597                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1598                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1599                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1600                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1601                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1602         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1603                 struct btrfs_shared_data_ref *sref;
1604                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1605                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1606                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1607         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1608                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1609         } else {
1610                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1611         }
1612         btrfs_mark_buffer_dirty(leaf);
1613         return 0;
1614 }
1615
1616 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1617                                  struct btrfs_root *root,
1618                                  struct btrfs_path *path,
1619                                  struct btrfs_extent_inline_ref **ref_ret,
1620                                  u64 bytenr, u64 num_bytes, u64 parent,
1621                                  u64 root_objectid, u64 owner, u64 offset)
1622 {
1623         int ret;
1624
1625         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1626                                            bytenr, num_bytes, parent,
1627                                            root_objectid, owner, offset, 0);
1628         if (ret != -ENOENT)
1629                 return ret;
1630
1631         btrfs_release_path(path);
1632         *ref_ret = NULL;
1633
1634         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1635                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1636                                             root_objectid);
1637         } else {
1638                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1639                                              root_objectid, owner, offset);
1640         }
1641         return ret;
1642 }
1643
1644 /*
1645  * helper to update/remove inline back ref
1646  */
1647 static noinline_for_stack
1648 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1649                                  struct btrfs_root *root,
1650                                  struct btrfs_path *path,
1651                                  struct btrfs_extent_inline_ref *iref,
1652                                  int refs_to_mod,
1653                                  struct btrfs_delayed_extent_op *extent_op)
1654 {
1655         struct extent_buffer *leaf;
1656         struct btrfs_extent_item *ei;
1657         struct btrfs_extent_data_ref *dref = NULL;
1658         struct btrfs_shared_data_ref *sref = NULL;
1659         unsigned long ptr;
1660         unsigned long end;
1661         u32 item_size;
1662         int size;
1663         int type;
1664         int ret;
1665         u64 refs;
1666
1667         leaf = path->nodes[0];
1668         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1669         refs = btrfs_extent_refs(leaf, ei);
1670         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1671         refs += refs_to_mod;
1672         btrfs_set_extent_refs(leaf, ei, refs);
1673         if (extent_op)
1674                 __run_delayed_extent_op(extent_op, leaf, ei);
1675
1676         type = btrfs_extent_inline_ref_type(leaf, iref);
1677
1678         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1679                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1680                 refs = btrfs_extent_data_ref_count(leaf, dref);
1681         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1682                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1683                 refs = btrfs_shared_data_ref_count(leaf, sref);
1684         } else {
1685                 refs = 1;
1686                 BUG_ON(refs_to_mod != -1);
1687         }
1688
1689         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1690         refs += refs_to_mod;
1691
1692         if (refs > 0) {
1693                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1694                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1695                 else
1696                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1697         } else {
1698                 size =  btrfs_extent_inline_ref_size(type);
1699                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1700                 ptr = (unsigned long)iref;
1701                 end = (unsigned long)ei + item_size;
1702                 if (ptr + size < end)
1703                         memmove_extent_buffer(leaf, ptr, ptr + size,
1704                                               end - ptr - size);
1705                 item_size -= size;
1706                 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1707         }
1708         btrfs_mark_buffer_dirty(leaf);
1709         return 0;
1710 }
1711
1712 static noinline_for_stack
1713 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1714                                  struct btrfs_root *root,
1715                                  struct btrfs_path *path,
1716                                  u64 bytenr, u64 num_bytes, u64 parent,
1717                                  u64 root_objectid, u64 owner,
1718                                  u64 offset, int refs_to_add,
1719                                  struct btrfs_delayed_extent_op *extent_op)
1720 {
1721         struct btrfs_extent_inline_ref *iref;
1722         int ret;
1723
1724         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1725                                            bytenr, num_bytes, parent,
1726                                            root_objectid, owner, offset, 1);
1727         if (ret == 0) {
1728                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1729                 ret = update_inline_extent_backref(trans, root, path, iref,
1730                                                    refs_to_add, extent_op);
1731         } else if (ret == -ENOENT) {
1732                 ret = setup_inline_extent_backref(trans, root, path, iref,
1733                                                   parent, root_objectid,
1734                                                   owner, offset, refs_to_add,
1735                                                   extent_op);
1736         }
1737         return ret;
1738 }
1739
1740 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1741                                  struct btrfs_root *root,
1742                                  struct btrfs_path *path,
1743                                  u64 bytenr, u64 parent, u64 root_objectid,
1744                                  u64 owner, u64 offset, int refs_to_add)
1745 {
1746         int ret;
1747         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1748                 BUG_ON(refs_to_add != 1);
1749                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1750                                             parent, root_objectid);
1751         } else {
1752                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1753                                              parent, root_objectid,
1754                                              owner, offset, refs_to_add);
1755         }
1756         return ret;
1757 }
1758
1759 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1760                                  struct btrfs_root *root,
1761                                  struct btrfs_path *path,
1762                                  struct btrfs_extent_inline_ref *iref,
1763                                  int refs_to_drop, int is_data)
1764 {
1765         int ret;
1766
1767         BUG_ON(!is_data && refs_to_drop != 1);
1768         if (iref) {
1769                 ret = update_inline_extent_backref(trans, root, path, iref,
1770                                                    -refs_to_drop, NULL);
1771         } else if (is_data) {
1772                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1773         } else {
1774                 ret = btrfs_del_item(trans, root, path);
1775         }
1776         return ret;
1777 }
1778
1779 static int btrfs_issue_discard(struct block_device *bdev,
1780                                 u64 start, u64 len)
1781 {
1782         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1783 }
1784
1785 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1786                                 u64 num_bytes, u64 *actual_bytes)
1787 {
1788         int ret;
1789         u64 discarded_bytes = 0;
1790         struct btrfs_multi_bio *multi = NULL;
1791
1792
1793         /* Tell the block device(s) that the sectors can be discarded */
1794         ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
1795                               bytenr, &num_bytes, &multi, 0);
1796         if (!ret) {
1797                 struct btrfs_bio_stripe *stripe = multi->stripes;
1798                 int i;
1799
1800
1801                 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1802                         if (!stripe->dev->can_discard)
1803                                 continue;
1804
1805                         ret = btrfs_issue_discard(stripe->dev->bdev,
1806                                                   stripe->physical,
1807                                                   stripe->length);
1808                         if (!ret)
1809                                 discarded_bytes += stripe->length;
1810                         else if (ret != -EOPNOTSUPP)
1811                                 break;
1812
1813                         /*
1814                          * Just in case we get back EOPNOTSUPP for some reason,
1815                          * just ignore the return value so we don't screw up
1816                          * people calling discard_extent.
1817                          */
1818                         ret = 0;
1819                 }
1820                 kfree(multi);
1821         }
1822
1823         if (actual_bytes)
1824                 *actual_bytes = discarded_bytes;
1825
1826
1827         return ret;
1828 }
1829
1830 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1831                          struct btrfs_root *root,
1832                          u64 bytenr, u64 num_bytes, u64 parent,
1833                          u64 root_objectid, u64 owner, u64 offset)
1834 {
1835         int ret;
1836         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1837                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1838
1839         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1840                 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1841                                         parent, root_objectid, (int)owner,
1842                                         BTRFS_ADD_DELAYED_REF, NULL);
1843         } else {
1844                 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1845                                         parent, root_objectid, owner, offset,
1846                                         BTRFS_ADD_DELAYED_REF, NULL);
1847         }
1848         return ret;
1849 }
1850
1851 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1852                                   struct btrfs_root *root,
1853                                   u64 bytenr, u64 num_bytes,
1854                                   u64 parent, u64 root_objectid,
1855                                   u64 owner, u64 offset, int refs_to_add,
1856                                   struct btrfs_delayed_extent_op *extent_op)
1857 {
1858         struct btrfs_path *path;
1859         struct extent_buffer *leaf;
1860         struct btrfs_extent_item *item;
1861         u64 refs;
1862         int ret;
1863         int err = 0;
1864
1865         path = btrfs_alloc_path();
1866         if (!path)
1867                 return -ENOMEM;
1868
1869         path->reada = 1;
1870         path->leave_spinning = 1;
1871         /* this will setup the path even if it fails to insert the back ref */
1872         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1873                                            path, bytenr, num_bytes, parent,
1874                                            root_objectid, owner, offset,
1875                                            refs_to_add, extent_op);
1876         if (ret == 0)
1877                 goto out;
1878
1879         if (ret != -EAGAIN) {
1880                 err = ret;
1881                 goto out;
1882         }
1883
1884         leaf = path->nodes[0];
1885         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1886         refs = btrfs_extent_refs(leaf, item);
1887         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1888         if (extent_op)
1889                 __run_delayed_extent_op(extent_op, leaf, item);
1890
1891         btrfs_mark_buffer_dirty(leaf);
1892         btrfs_release_path(path);
1893
1894         path->reada = 1;
1895         path->leave_spinning = 1;
1896
1897         /* now insert the actual backref */
1898         ret = insert_extent_backref(trans, root->fs_info->extent_root,
1899                                     path, bytenr, parent, root_objectid,
1900                                     owner, offset, refs_to_add);
1901         BUG_ON(ret);
1902 out:
1903         btrfs_free_path(path);
1904         return err;
1905 }
1906
1907 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1908                                 struct btrfs_root *root,
1909                                 struct btrfs_delayed_ref_node *node,
1910                                 struct btrfs_delayed_extent_op *extent_op,
1911                                 int insert_reserved)
1912 {
1913         int ret = 0;
1914         struct btrfs_delayed_data_ref *ref;
1915         struct btrfs_key ins;
1916         u64 parent = 0;
1917         u64 ref_root = 0;
1918         u64 flags = 0;
1919
1920         ins.objectid = node->bytenr;
1921         ins.offset = node->num_bytes;
1922         ins.type = BTRFS_EXTENT_ITEM_KEY;
1923
1924         ref = btrfs_delayed_node_to_data_ref(node);
1925         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1926                 parent = ref->parent;
1927         else
1928                 ref_root = ref->root;
1929
1930         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1931                 if (extent_op) {
1932                         BUG_ON(extent_op->update_key);
1933                         flags |= extent_op->flags_to_set;
1934                 }
1935                 ret = alloc_reserved_file_extent(trans, root,
1936                                                  parent, ref_root, flags,
1937                                                  ref->objectid, ref->offset,
1938                                                  &ins, node->ref_mod);
1939         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1940                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1941                                              node->num_bytes, parent,
1942                                              ref_root, ref->objectid,
1943                                              ref->offset, node->ref_mod,
1944                                              extent_op);
1945         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1946                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1947                                           node->num_bytes, parent,
1948                                           ref_root, ref->objectid,
1949                                           ref->offset, node->ref_mod,
1950                                           extent_op);
1951         } else {
1952                 BUG();
1953         }
1954         return ret;
1955 }
1956
1957 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1958                                     struct extent_buffer *leaf,
1959                                     struct btrfs_extent_item *ei)
1960 {
1961         u64 flags = btrfs_extent_flags(leaf, ei);
1962         if (extent_op->update_flags) {
1963                 flags |= extent_op->flags_to_set;
1964                 btrfs_set_extent_flags(leaf, ei, flags);
1965         }
1966
1967         if (extent_op->update_key) {
1968                 struct btrfs_tree_block_info *bi;
1969                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1970                 bi = (struct btrfs_tree_block_info *)(ei + 1);
1971                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1972         }
1973 }
1974
1975 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1976                                  struct btrfs_root *root,
1977                                  struct btrfs_delayed_ref_node *node,
1978                                  struct btrfs_delayed_extent_op *extent_op)
1979 {
1980         struct btrfs_key key;
1981         struct btrfs_path *path;
1982         struct btrfs_extent_item *ei;
1983         struct extent_buffer *leaf;
1984         u32 item_size;
1985         int ret;
1986         int err = 0;
1987
1988         path = btrfs_alloc_path();
1989         if (!path)
1990                 return -ENOMEM;
1991
1992         key.objectid = node->bytenr;
1993         key.type = BTRFS_EXTENT_ITEM_KEY;
1994         key.offset = node->num_bytes;
1995
1996         path->reada = 1;
1997         path->leave_spinning = 1;
1998         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1999                                 path, 0, 1);
2000         if (ret < 0) {
2001                 err = ret;
2002                 goto out;
2003         }
2004         if (ret > 0) {
2005                 err = -EIO;
2006                 goto out;
2007         }
2008
2009         leaf = path->nodes[0];
2010         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2011 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2012         if (item_size < sizeof(*ei)) {
2013                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2014                                              path, (u64)-1, 0);
2015                 if (ret < 0) {
2016                         err = ret;
2017                         goto out;
2018                 }
2019                 leaf = path->nodes[0];
2020                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2021         }
2022 #endif
2023         BUG_ON(item_size < sizeof(*ei));
2024         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2025         __run_delayed_extent_op(extent_op, leaf, ei);
2026
2027         btrfs_mark_buffer_dirty(leaf);
2028 out:
2029         btrfs_free_path(path);
2030         return err;
2031 }
2032
2033 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2034                                 struct btrfs_root *root,
2035                                 struct btrfs_delayed_ref_node *node,
2036                                 struct btrfs_delayed_extent_op *extent_op,
2037                                 int insert_reserved)
2038 {
2039         int ret = 0;
2040         struct btrfs_delayed_tree_ref *ref;
2041         struct btrfs_key ins;
2042         u64 parent = 0;
2043         u64 ref_root = 0;
2044
2045         ins.objectid = node->bytenr;
2046         ins.offset = node->num_bytes;
2047         ins.type = BTRFS_EXTENT_ITEM_KEY;
2048
2049         ref = btrfs_delayed_node_to_tree_ref(node);
2050         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2051                 parent = ref->parent;
2052         else
2053                 ref_root = ref->root;
2054
2055         BUG_ON(node->ref_mod != 1);
2056         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2057                 BUG_ON(!extent_op || !extent_op->update_flags ||
2058                        !extent_op->update_key);
2059                 ret = alloc_reserved_tree_block(trans, root,
2060                                                 parent, ref_root,
2061                                                 extent_op->flags_to_set,
2062                                                 &extent_op->key,
2063                                                 ref->level, &ins);
2064         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2065                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2066                                              node->num_bytes, parent, ref_root,
2067                                              ref->level, 0, 1, extent_op);
2068         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2069                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2070                                           node->num_bytes, parent, ref_root,
2071                                           ref->level, 0, 1, extent_op);
2072         } else {
2073                 BUG();
2074         }
2075         return ret;
2076 }
2077
2078 /* helper function to actually process a single delayed ref entry */
2079 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2080                                struct btrfs_root *root,
2081                                struct btrfs_delayed_ref_node *node,
2082                                struct btrfs_delayed_extent_op *extent_op,
2083                                int insert_reserved)
2084 {
2085         int ret;
2086         if (btrfs_delayed_ref_is_head(node)) {
2087                 struct btrfs_delayed_ref_head *head;
2088                 /*
2089                  * we've hit the end of the chain and we were supposed
2090                  * to insert this extent into the tree.  But, it got
2091                  * deleted before we ever needed to insert it, so all
2092                  * we have to do is clean up the accounting
2093                  */
2094                 BUG_ON(extent_op);
2095                 head = btrfs_delayed_node_to_head(node);
2096                 if (insert_reserved) {
2097                         btrfs_pin_extent(root, node->bytenr,
2098                                          node->num_bytes, 1);
2099                         if (head->is_data) {
2100                                 ret = btrfs_del_csums(trans, root,
2101                                                       node->bytenr,
2102                                                       node->num_bytes);
2103                                 BUG_ON(ret);
2104                         }
2105                 }
2106                 mutex_unlock(&head->mutex);
2107                 return 0;
2108         }
2109
2110         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2111             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2112                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2113                                            insert_reserved);
2114         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2115                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2116                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2117                                            insert_reserved);
2118         else
2119                 BUG();
2120         return ret;
2121 }
2122
2123 static noinline struct btrfs_delayed_ref_node *
2124 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2125 {
2126         struct rb_node *node;
2127         struct btrfs_delayed_ref_node *ref;
2128         int action = BTRFS_ADD_DELAYED_REF;
2129 again:
2130         /*
2131          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2132          * this prevents ref count from going down to zero when
2133          * there still are pending delayed ref.
2134          */
2135         node = rb_prev(&head->node.rb_node);
2136         while (1) {
2137                 if (!node)
2138                         break;
2139                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2140                                 rb_node);
2141                 if (ref->bytenr != head->node.bytenr)
2142                         break;
2143                 if (ref->action == action)
2144                         return ref;
2145                 node = rb_prev(node);
2146         }
2147         if (action == BTRFS_ADD_DELAYED_REF) {
2148                 action = BTRFS_DROP_DELAYED_REF;
2149                 goto again;
2150         }
2151         return NULL;
2152 }
2153
2154 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2155                                        struct btrfs_root *root,
2156                                        struct list_head *cluster)
2157 {
2158         struct btrfs_delayed_ref_root *delayed_refs;
2159         struct btrfs_delayed_ref_node *ref;
2160         struct btrfs_delayed_ref_head *locked_ref = NULL;
2161         struct btrfs_delayed_extent_op *extent_op;
2162         int ret;
2163         int count = 0;
2164         int must_insert_reserved = 0;
2165
2166         delayed_refs = &trans->transaction->delayed_refs;
2167         while (1) {
2168                 if (!locked_ref) {
2169                         /* pick a new head ref from the cluster list */
2170                         if (list_empty(cluster))
2171                                 break;
2172
2173                         locked_ref = list_entry(cluster->next,
2174                                      struct btrfs_delayed_ref_head, cluster);
2175
2176                         /* grab the lock that says we are going to process
2177                          * all the refs for this head */
2178                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2179
2180                         /*
2181                          * we may have dropped the spin lock to get the head
2182                          * mutex lock, and that might have given someone else
2183                          * time to free the head.  If that's true, it has been
2184                          * removed from our list and we can move on.
2185                          */
2186                         if (ret == -EAGAIN) {
2187                                 locked_ref = NULL;
2188                                 count++;
2189                                 continue;
2190                         }
2191                 }
2192
2193                 /*
2194                  * record the must insert reserved flag before we
2195                  * drop the spin lock.
2196                  */
2197                 must_insert_reserved = locked_ref->must_insert_reserved;
2198                 locked_ref->must_insert_reserved = 0;
2199
2200                 extent_op = locked_ref->extent_op;
2201                 locked_ref->extent_op = NULL;
2202
2203                 /*
2204                  * locked_ref is the head node, so we have to go one
2205                  * node back for any delayed ref updates
2206                  */
2207                 ref = select_delayed_ref(locked_ref);
2208                 if (!ref) {
2209                         /* All delayed refs have been processed, Go ahead
2210                          * and send the head node to run_one_delayed_ref,
2211                          * so that any accounting fixes can happen
2212                          */
2213                         ref = &locked_ref->node;
2214
2215                         if (extent_op && must_insert_reserved) {
2216                                 kfree(extent_op);
2217                                 extent_op = NULL;
2218                         }
2219
2220                         if (extent_op) {
2221                                 spin_unlock(&delayed_refs->lock);
2222
2223                                 ret = run_delayed_extent_op(trans, root,
2224                                                             ref, extent_op);
2225                                 BUG_ON(ret);
2226                                 kfree(extent_op);
2227
2228                                 cond_resched();
2229                                 spin_lock(&delayed_refs->lock);
2230                                 continue;
2231                         }
2232
2233                         list_del_init(&locked_ref->cluster);
2234                         locked_ref = NULL;
2235                 }
2236
2237                 ref->in_tree = 0;
2238                 rb_erase(&ref->rb_node, &delayed_refs->root);
2239                 delayed_refs->num_entries--;
2240
2241                 spin_unlock(&delayed_refs->lock);
2242
2243                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2244                                           must_insert_reserved);
2245                 BUG_ON(ret);
2246
2247                 btrfs_put_delayed_ref(ref);
2248                 kfree(extent_op);
2249                 count++;
2250
2251                 cond_resched();
2252                 spin_lock(&delayed_refs->lock);
2253         }
2254         return count;
2255 }
2256
2257 /*
2258  * this starts processing the delayed reference count updates and
2259  * extent insertions we have queued up so far.  count can be
2260  * 0, which means to process everything in the tree at the start
2261  * of the run (but not newly added entries), or it can be some target
2262  * number you'd like to process.
2263  */
2264 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2265                            struct btrfs_root *root, unsigned long count)
2266 {
2267         struct rb_node *node;
2268         struct btrfs_delayed_ref_root *delayed_refs;
2269         struct btrfs_delayed_ref_node *ref;
2270         struct list_head cluster;
2271         int ret;
2272         int run_all = count == (unsigned long)-1;
2273         int run_most = 0;
2274
2275         if (root == root->fs_info->extent_root)
2276                 root = root->fs_info->tree_root;
2277
2278         delayed_refs = &trans->transaction->delayed_refs;
2279         INIT_LIST_HEAD(&cluster);
2280 again:
2281         spin_lock(&delayed_refs->lock);
2282         if (count == 0) {
2283                 count = delayed_refs->num_entries * 2;
2284                 run_most = 1;
2285         }
2286         while (1) {
2287                 if (!(run_all || run_most) &&
2288                     delayed_refs->num_heads_ready < 64)
2289                         break;
2290
2291                 /*
2292                  * go find something we can process in the rbtree.  We start at
2293                  * the beginning of the tree, and then build a cluster
2294                  * of refs to process starting at the first one we are able to
2295                  * lock
2296                  */
2297                 ret = btrfs_find_ref_cluster(trans, &cluster,
2298                                              delayed_refs->run_delayed_start);
2299                 if (ret)
2300                         break;
2301
2302                 ret = run_clustered_refs(trans, root, &cluster);
2303                 BUG_ON(ret < 0);
2304
2305                 count -= min_t(unsigned long, ret, count);
2306
2307                 if (count == 0)
2308                         break;
2309         }
2310
2311         if (run_all) {
2312                 node = rb_first(&delayed_refs->root);
2313                 if (!node)
2314                         goto out;
2315                 count = (unsigned long)-1;
2316
2317                 while (node) {
2318                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2319                                        rb_node);
2320                         if (btrfs_delayed_ref_is_head(ref)) {
2321                                 struct btrfs_delayed_ref_head *head;
2322
2323                                 head = btrfs_delayed_node_to_head(ref);
2324                                 atomic_inc(&ref->refs);
2325
2326                                 spin_unlock(&delayed_refs->lock);
2327                                 /*
2328                                  * Mutex was contended, block until it's
2329                                  * released and try again
2330                                  */
2331                                 mutex_lock(&head->mutex);
2332                                 mutex_unlock(&head->mutex);
2333
2334                                 btrfs_put_delayed_ref(ref);
2335                                 cond_resched();
2336                                 goto again;
2337                         }
2338                         node = rb_next(node);
2339                 }
2340                 spin_unlock(&delayed_refs->lock);
2341                 schedule_timeout(1);
2342                 goto again;
2343         }
2344 out:
2345         spin_unlock(&delayed_refs->lock);
2346         return 0;
2347 }
2348
2349 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2350                                 struct btrfs_root *root,
2351                                 u64 bytenr, u64 num_bytes, u64 flags,
2352                                 int is_data)
2353 {
2354         struct btrfs_delayed_extent_op *extent_op;
2355         int ret;
2356
2357         extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2358         if (!extent_op)
2359                 return -ENOMEM;
2360
2361         extent_op->flags_to_set = flags;
2362         extent_op->update_flags = 1;
2363         extent_op->update_key = 0;
2364         extent_op->is_data = is_data ? 1 : 0;
2365
2366         ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2367         if (ret)
2368                 kfree(extent_op);
2369         return ret;
2370 }
2371
2372 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2373                                       struct btrfs_root *root,
2374                                       struct btrfs_path *path,
2375                                       u64 objectid, u64 offset, u64 bytenr)
2376 {
2377         struct btrfs_delayed_ref_head *head;
2378         struct btrfs_delayed_ref_node *ref;
2379         struct btrfs_delayed_data_ref *data_ref;
2380         struct btrfs_delayed_ref_root *delayed_refs;
2381         struct rb_node *node;
2382         int ret = 0;
2383
2384         ret = -ENOENT;
2385         delayed_refs = &trans->transaction->delayed_refs;
2386         spin_lock(&delayed_refs->lock);
2387         head = btrfs_find_delayed_ref_head(trans, bytenr);
2388         if (!head)
2389                 goto out;
2390
2391         if (!mutex_trylock(&head->mutex)) {
2392                 atomic_inc(&head->node.refs);
2393                 spin_unlock(&delayed_refs->lock);
2394
2395                 btrfs_release_path(path);
2396
2397                 /*
2398                  * Mutex was contended, block until it's released and let
2399                  * caller try again
2400                  */
2401                 mutex_lock(&head->mutex);
2402                 mutex_unlock(&head->mutex);
2403                 btrfs_put_delayed_ref(&head->node);
2404                 return -EAGAIN;
2405         }
2406
2407         node = rb_prev(&head->node.rb_node);
2408         if (!node)
2409                 goto out_unlock;
2410
2411         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2412
2413         if (ref->bytenr != bytenr)
2414                 goto out_unlock;
2415
2416         ret = 1;
2417         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2418                 goto out_unlock;
2419
2420         data_ref = btrfs_delayed_node_to_data_ref(ref);
2421
2422         node = rb_prev(node);
2423         if (node) {
2424                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2425                 if (ref->bytenr == bytenr)
2426                         goto out_unlock;
2427         }
2428
2429         if (data_ref->root != root->root_key.objectid ||
2430             data_ref->objectid != objectid || data_ref->offset != offset)
2431                 goto out_unlock;
2432
2433         ret = 0;
2434 out_unlock:
2435         mutex_unlock(&head->mutex);
2436 out:
2437         spin_unlock(&delayed_refs->lock);
2438         return ret;
2439 }
2440
2441 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2442                                         struct btrfs_root *root,
2443                                         struct btrfs_path *path,
2444                                         u64 objectid, u64 offset, u64 bytenr)
2445 {
2446         struct btrfs_root *extent_root = root->fs_info->extent_root;
2447         struct extent_buffer *leaf;
2448         struct btrfs_extent_data_ref *ref;
2449         struct btrfs_extent_inline_ref *iref;
2450         struct btrfs_extent_item *ei;
2451         struct btrfs_key key;
2452         u32 item_size;
2453         int ret;
2454
2455         key.objectid = bytenr;
2456         key.offset = (u64)-1;
2457         key.type = BTRFS_EXTENT_ITEM_KEY;
2458
2459         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2460         if (ret < 0)
2461                 goto out;
2462         BUG_ON(ret == 0);
2463
2464         ret = -ENOENT;
2465         if (path->slots[0] == 0)
2466                 goto out;
2467
2468         path->slots[0]--;
2469         leaf = path->nodes[0];
2470         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2471
2472         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2473                 goto out;
2474
2475         ret = 1;
2476         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2477 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2478         if (item_size < sizeof(*ei)) {
2479                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2480                 goto out;
2481         }
2482 #endif
2483         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2484
2485         if (item_size != sizeof(*ei) +
2486             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2487                 goto out;
2488
2489         if (btrfs_extent_generation(leaf, ei) <=
2490             btrfs_root_last_snapshot(&root->root_item))
2491                 goto out;
2492
2493         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2494         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2495             BTRFS_EXTENT_DATA_REF_KEY)
2496                 goto out;
2497
2498         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2499         if (btrfs_extent_refs(leaf, ei) !=
2500             btrfs_extent_data_ref_count(leaf, ref) ||
2501             btrfs_extent_data_ref_root(leaf, ref) !=
2502             root->root_key.objectid ||
2503             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2504             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2505                 goto out;
2506
2507         ret = 0;
2508 out:
2509         return ret;
2510 }
2511
2512 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2513                           struct btrfs_root *root,
2514                           u64 objectid, u64 offset, u64 bytenr)
2515 {
2516         struct btrfs_path *path;
2517         int ret;
2518         int ret2;
2519
2520         path = btrfs_alloc_path();
2521         if (!path)
2522                 return -ENOENT;
2523
2524         do {
2525                 ret = check_committed_ref(trans, root, path, objectid,
2526                                           offset, bytenr);
2527                 if (ret && ret != -ENOENT)
2528                         goto out;
2529
2530                 ret2 = check_delayed_ref(trans, root, path, objectid,
2531                                          offset, bytenr);
2532         } while (ret2 == -EAGAIN);
2533
2534         if (ret2 && ret2 != -ENOENT) {
2535                 ret = ret2;
2536                 goto out;
2537         }
2538
2539         if (ret != -ENOENT || ret2 != -ENOENT)
2540                 ret = 0;
2541 out:
2542         btrfs_free_path(path);
2543         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2544                 WARN_ON(ret > 0);
2545         return ret;
2546 }
2547
2548 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2549                            struct btrfs_root *root,
2550                            struct extent_buffer *buf,
2551                            int full_backref, int inc)
2552 {
2553         u64 bytenr;
2554         u64 num_bytes;
2555         u64 parent;
2556         u64 ref_root;
2557         u32 nritems;
2558         struct btrfs_key key;
2559         struct btrfs_file_extent_item *fi;
2560         int i;
2561         int level;
2562         int ret = 0;
2563         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2564                             u64, u64, u64, u64, u64, u64);
2565
2566         ref_root = btrfs_header_owner(buf);
2567         nritems = btrfs_header_nritems(buf);
2568         level = btrfs_header_level(buf);
2569
2570         if (!root->ref_cows && level == 0)
2571                 return 0;
2572
2573         if (inc)
2574                 process_func = btrfs_inc_extent_ref;
2575         else
2576                 process_func = btrfs_free_extent;
2577
2578         if (full_backref)
2579                 parent = buf->start;
2580         else
2581                 parent = 0;
2582
2583         for (i = 0; i < nritems; i++) {
2584                 if (level == 0) {
2585                         btrfs_item_key_to_cpu(buf, &key, i);
2586                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2587                                 continue;
2588                         fi = btrfs_item_ptr(buf, i,
2589                                             struct btrfs_file_extent_item);
2590                         if (btrfs_file_extent_type(buf, fi) ==
2591                             BTRFS_FILE_EXTENT_INLINE)
2592                                 continue;
2593                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2594                         if (bytenr == 0)
2595                                 continue;
2596
2597                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2598                         key.offset -= btrfs_file_extent_offset(buf, fi);
2599                         ret = process_func(trans, root, bytenr, num_bytes,
2600                                            parent, ref_root, key.objectid,
2601                                            key.offset);
2602                         if (ret)
2603                                 goto fail;
2604                 } else {
2605                         bytenr = btrfs_node_blockptr(buf, i);
2606                         num_bytes = btrfs_level_size(root, level - 1);
2607                         ret = process_func(trans, root, bytenr, num_bytes,
2608                                            parent, ref_root, level - 1, 0);
2609                         if (ret)
2610                                 goto fail;
2611                 }
2612         }
2613         return 0;
2614 fail:
2615         BUG();
2616         return ret;
2617 }
2618
2619 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2620                   struct extent_buffer *buf, int full_backref)
2621 {
2622         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2623 }
2624
2625 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2626                   struct extent_buffer *buf, int full_backref)
2627 {
2628         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2629 }
2630
2631 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2632                                  struct btrfs_root *root,
2633                                  struct btrfs_path *path,
2634                                  struct btrfs_block_group_cache *cache)
2635 {
2636         int ret;
2637         struct btrfs_root *extent_root = root->fs_info->extent_root;
2638         unsigned long bi;
2639         struct extent_buffer *leaf;
2640
2641         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2642         if (ret < 0)
2643                 goto fail;
2644         BUG_ON(ret);
2645
2646         leaf = path->nodes[0];
2647         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2648         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2649         btrfs_mark_buffer_dirty(leaf);
2650         btrfs_release_path(path);
2651 fail:
2652         if (ret)
2653                 return ret;
2654         return 0;
2655
2656 }
2657
2658 static struct btrfs_block_group_cache *
2659 next_block_group(struct btrfs_root *root,
2660                  struct btrfs_block_group_cache *cache)
2661 {
2662         struct rb_node *node;
2663         spin_lock(&root->fs_info->block_group_cache_lock);
2664         node = rb_next(&cache->cache_node);
2665         btrfs_put_block_group(cache);
2666         if (node) {
2667                 cache = rb_entry(node, struct btrfs_block_group_cache,
2668                                  cache_node);
2669                 btrfs_get_block_group(cache);
2670         } else
2671                 cache = NULL;
2672         spin_unlock(&root->fs_info->block_group_cache_lock);
2673         return cache;
2674 }
2675
2676 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2677                             struct btrfs_trans_handle *trans,
2678                             struct btrfs_path *path)
2679 {
2680         struct btrfs_root *root = block_group->fs_info->tree_root;
2681         struct inode *inode = NULL;
2682         u64 alloc_hint = 0;
2683         int dcs = BTRFS_DC_ERROR;
2684         int num_pages = 0;
2685         int retries = 0;
2686         int ret = 0;
2687
2688         /*
2689          * If this block group is smaller than 100 megs don't bother caching the
2690          * block group.
2691          */
2692         if (block_group->key.offset < (100 * 1024 * 1024)) {
2693                 spin_lock(&block_group->lock);
2694                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2695                 spin_unlock(&block_group->lock);
2696                 return 0;
2697         }
2698
2699 again:
2700         inode = lookup_free_space_inode(root, block_group, path);
2701         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2702                 ret = PTR_ERR(inode);
2703                 btrfs_release_path(path);
2704                 goto out;
2705         }
2706
2707         if (IS_ERR(inode)) {
2708                 BUG_ON(retries);
2709                 retries++;
2710
2711                 if (block_group->ro)
2712                         goto out_free;
2713
2714                 ret = create_free_space_inode(root, trans, block_group, path);
2715                 if (ret)
2716                         goto out_free;
2717                 goto again;
2718         }
2719
2720         /*
2721          * We want to set the generation to 0, that way if anything goes wrong
2722          * from here on out we know not to trust this cache when we load up next
2723          * time.
2724          */
2725         BTRFS_I(inode)->generation = 0;
2726         ret = btrfs_update_inode(trans, root, inode);
2727         WARN_ON(ret);
2728
2729         if (i_size_read(inode) > 0) {
2730                 ret = btrfs_truncate_free_space_cache(root, trans, path,
2731                                                       inode);
2732                 if (ret)
2733                         goto out_put;
2734         }
2735
2736         spin_lock(&block_group->lock);
2737         if (block_group->cached != BTRFS_CACHE_FINISHED) {
2738                 /* We're not cached, don't bother trying to write stuff out */
2739                 dcs = BTRFS_DC_WRITTEN;
2740                 spin_unlock(&block_group->lock);
2741                 goto out_put;
2742         }
2743         spin_unlock(&block_group->lock);
2744
2745         num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
2746         if (!num_pages)
2747                 num_pages = 1;
2748
2749         /*
2750          * Just to make absolutely sure we have enough space, we're going to
2751          * preallocate 12 pages worth of space for each block group.  In
2752          * practice we ought to use at most 8, but we need extra space so we can
2753          * add our header and have a terminator between the extents and the
2754          * bitmaps.
2755          */
2756         num_pages *= 16;
2757         num_pages *= PAGE_CACHE_SIZE;
2758
2759         ret = btrfs_delalloc_reserve_space(inode, num_pages);
2760         if (ret)
2761                 goto out_put;
2762
2763         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2764                                               num_pages, num_pages,
2765                                               &alloc_hint);
2766         if (!ret) {
2767                 dcs = BTRFS_DC_SETUP;
2768                 btrfs_free_reserved_data_space(inode, num_pages);
2769         } else {
2770                 btrfs_delalloc_release_space(inode, num_pages);
2771         }
2772
2773 out_put:
2774         iput(inode);
2775 out_free:
2776         btrfs_release_path(path);
2777 out:
2778         spin_lock(&block_group->lock);
2779         block_group->disk_cache_state = dcs;
2780         spin_unlock(&block_group->lock);
2781
2782         return ret;
2783 }
2784
2785 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2786                                    struct btrfs_root *root)
2787 {
2788         struct btrfs_block_group_cache *cache;
2789         int err = 0;
2790         struct btrfs_path *path;
2791         u64 last = 0;
2792
2793         path = btrfs_alloc_path();
2794         if (!path)
2795                 return -ENOMEM;
2796
2797 again:
2798         while (1) {
2799                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2800                 while (cache) {
2801                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2802                                 break;
2803                         cache = next_block_group(root, cache);
2804                 }
2805                 if (!cache) {
2806                         if (last == 0)
2807                                 break;
2808                         last = 0;
2809                         continue;
2810                 }
2811                 err = cache_save_setup(cache, trans, path);
2812                 last = cache->key.objectid + cache->key.offset;
2813                 btrfs_put_block_group(cache);
2814         }
2815
2816         while (1) {
2817                 if (last == 0) {
2818                         err = btrfs_run_delayed_refs(trans, root,
2819                                                      (unsigned long)-1);
2820                         BUG_ON(err);
2821                 }
2822
2823                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2824                 while (cache) {
2825                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
2826                                 btrfs_put_block_group(cache);
2827                                 goto again;
2828                         }
2829
2830                         if (cache->dirty)
2831                                 break;
2832                         cache = next_block_group(root, cache);
2833                 }
2834                 if (!cache) {
2835                         if (last == 0)
2836                                 break;
2837                         last = 0;
2838                         continue;
2839                 }
2840
2841                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
2842                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
2843                 cache->dirty = 0;
2844                 last = cache->key.objectid + cache->key.offset;
2845
2846                 err = write_one_cache_group(trans, root, path, cache);
2847                 BUG_ON(err);
2848                 btrfs_put_block_group(cache);
2849         }
2850
2851         while (1) {
2852                 /*
2853                  * I don't think this is needed since we're just marking our
2854                  * preallocated extent as written, but just in case it can't
2855                  * hurt.
2856                  */
2857                 if (last == 0) {
2858                         err = btrfs_run_delayed_refs(trans, root,
2859                                                      (unsigned long)-1);
2860                         BUG_ON(err);
2861                 }
2862
2863                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2864                 while (cache) {
2865                         /*
2866                          * Really this shouldn't happen, but it could if we
2867                          * couldn't write the entire preallocated extent and
2868                          * splitting the extent resulted in a new block.
2869                          */
2870                         if (cache->dirty) {
2871                                 btrfs_put_block_group(cache);
2872                                 goto again;
2873                         }
2874                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2875                                 break;
2876                         cache = next_block_group(root, cache);
2877                 }
2878                 if (!cache) {
2879                         if (last == 0)
2880                                 break;
2881                         last = 0;
2882                         continue;
2883                 }
2884
2885                 btrfs_write_out_cache(root, trans, cache, path);
2886
2887                 /*
2888                  * If we didn't have an error then the cache state is still
2889                  * NEED_WRITE, so we can set it to WRITTEN.
2890                  */
2891                 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2892                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
2893                 last = cache->key.objectid + cache->key.offset;
2894                 btrfs_put_block_group(cache);
2895         }
2896
2897         btrfs_free_path(path);
2898         return 0;
2899 }
2900
2901 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2902 {
2903         struct btrfs_block_group_cache *block_group;
2904         int readonly = 0;
2905
2906         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2907         if (!block_group || block_group->ro)
2908                 readonly = 1;
2909         if (block_group)
2910                 btrfs_put_block_group(block_group);
2911         return readonly;
2912 }
2913
2914 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2915                              u64 total_bytes, u64 bytes_used,
2916                              struct btrfs_space_info **space_info)
2917 {
2918         struct btrfs_space_info *found;
2919         int i;
2920         int factor;
2921
2922         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2923                      BTRFS_BLOCK_GROUP_RAID10))
2924                 factor = 2;
2925         else
2926                 factor = 1;
2927
2928         found = __find_space_info(info, flags);
2929         if (found) {
2930                 spin_lock(&found->lock);
2931                 found->total_bytes += total_bytes;
2932                 found->disk_total += total_bytes * factor;
2933                 found->bytes_used += bytes_used;
2934                 found->disk_used += bytes_used * factor;
2935                 found->full = 0;
2936                 spin_unlock(&found->lock);
2937                 *space_info = found;
2938                 return 0;
2939         }
2940         found = kzalloc(sizeof(*found), GFP_NOFS);
2941         if (!found)
2942                 return -ENOMEM;
2943
2944         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
2945                 INIT_LIST_HEAD(&found->block_groups[i]);
2946         init_rwsem(&found->groups_sem);
2947         spin_lock_init(&found->lock);
2948         found->flags = flags & (BTRFS_BLOCK_GROUP_DATA |
2949                                 BTRFS_BLOCK_GROUP_SYSTEM |
2950                                 BTRFS_BLOCK_GROUP_METADATA);
2951         found->total_bytes = total_bytes;
2952         found->disk_total = total_bytes * factor;
2953         found->bytes_used = bytes_used;
2954         found->disk_used = bytes_used * factor;
2955         found->bytes_pinned = 0;
2956         found->bytes_reserved = 0;
2957         found->bytes_readonly = 0;
2958         found->bytes_may_use = 0;
2959         found->full = 0;
2960         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
2961         found->chunk_alloc = 0;
2962         found->flush = 0;
2963         init_waitqueue_head(&found->wait);
2964         *space_info = found;
2965         list_add_rcu(&found->list, &info->space_info);
2966         return 0;
2967 }
2968
2969 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
2970 {
2971         u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
2972                                    BTRFS_BLOCK_GROUP_RAID1 |
2973                                    BTRFS_BLOCK_GROUP_RAID10 |
2974                                    BTRFS_BLOCK_GROUP_DUP);
2975         if (extra_flags) {
2976                 if (flags & BTRFS_BLOCK_GROUP_DATA)
2977                         fs_info->avail_data_alloc_bits |= extra_flags;
2978                 if (flags & BTRFS_BLOCK_GROUP_METADATA)
2979                         fs_info->avail_metadata_alloc_bits |= extra_flags;
2980                 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
2981                         fs_info->avail_system_alloc_bits |= extra_flags;
2982         }
2983 }
2984
2985 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
2986 {
2987         /*
2988          * we add in the count of missing devices because we want
2989          * to make sure that any RAID levels on a degraded FS
2990          * continue to be honored.
2991          */
2992         u64 num_devices = root->fs_info->fs_devices->rw_devices +
2993                 root->fs_info->fs_devices->missing_devices;
2994
2995         if (num_devices == 1)
2996                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
2997         if (num_devices < 4)
2998                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
2999
3000         if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
3001             (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3002                       BTRFS_BLOCK_GROUP_RAID10))) {
3003                 flags &= ~BTRFS_BLOCK_GROUP_DUP;
3004         }
3005
3006         if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
3007             (flags & BTRFS_BLOCK_GROUP_RAID10)) {
3008                 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
3009         }
3010
3011         if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
3012             ((flags & BTRFS_BLOCK_GROUP_RAID1) |
3013              (flags & BTRFS_BLOCK_GROUP_RAID10) |
3014              (flags & BTRFS_BLOCK_GROUP_DUP)))
3015                 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
3016         return flags;
3017 }
3018
3019 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3020 {
3021         if (flags & BTRFS_BLOCK_GROUP_DATA)
3022                 flags |= root->fs_info->avail_data_alloc_bits &
3023                          root->fs_info->data_alloc_profile;
3024         else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3025                 flags |= root->fs_info->avail_system_alloc_bits &
3026                          root->fs_info->system_alloc_profile;
3027         else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3028                 flags |= root->fs_info->avail_metadata_alloc_bits &
3029                          root->fs_info->metadata_alloc_profile;
3030         return btrfs_reduce_alloc_profile(root, flags);
3031 }
3032
3033 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3034 {
3035         u64 flags;
3036
3037         if (data)
3038                 flags = BTRFS_BLOCK_GROUP_DATA;
3039         else if (root == root->fs_info->chunk_root)
3040                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3041         else
3042                 flags = BTRFS_BLOCK_GROUP_METADATA;
3043
3044         return get_alloc_profile(root, flags);
3045 }
3046
3047 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
3048 {
3049         BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
3050                                                        BTRFS_BLOCK_GROUP_DATA);
3051 }
3052
3053 /*
3054  * This will check the space that the inode allocates from to make sure we have
3055  * enough space for bytes.
3056  */
3057 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3058 {
3059         struct btrfs_space_info *data_sinfo;
3060         struct btrfs_root *root = BTRFS_I(inode)->root;
3061         u64 used;
3062         int ret = 0, committed = 0, alloc_chunk = 1;
3063
3064         /* make sure bytes are sectorsize aligned */
3065         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3066
3067         if (root == root->fs_info->tree_root ||
3068             BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3069                 alloc_chunk = 0;
3070                 committed = 1;
3071         }
3072
3073         data_sinfo = BTRFS_I(inode)->space_info;
3074         if (!data_sinfo)
3075                 goto alloc;
3076
3077 again:
3078         /* make sure we have enough space to handle the data first */
3079         spin_lock(&data_sinfo->lock);
3080         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3081                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3082                 data_sinfo->bytes_may_use;
3083
3084         if (used + bytes > data_sinfo->total_bytes) {
3085                 struct btrfs_trans_handle *trans;
3086
3087                 /*
3088                  * if we don't have enough free bytes in this space then we need
3089                  * to alloc a new chunk.
3090                  */
3091                 if (!data_sinfo->full && alloc_chunk) {
3092                         u64 alloc_target;
3093
3094                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3095                         spin_unlock(&data_sinfo->lock);
3096 alloc:
3097                         alloc_target = btrfs_get_alloc_profile(root, 1);
3098                         trans = btrfs_join_transaction(root);
3099                         if (IS_ERR(trans))
3100                                 return PTR_ERR(trans);
3101
3102                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3103                                              bytes + 2 * 1024 * 1024,
3104                                              alloc_target,
3105                                              CHUNK_ALLOC_NO_FORCE);
3106                         btrfs_end_transaction(trans, root);
3107                         if (ret < 0) {
3108                                 if (ret != -ENOSPC)
3109                                         return ret;
3110                                 else
3111                                         goto commit_trans;
3112                         }
3113
3114                         if (!data_sinfo) {
3115                                 btrfs_set_inode_space_info(root, inode);
3116                                 data_sinfo = BTRFS_I(inode)->space_info;
3117                         }
3118                         goto again;
3119                 }
3120
3121                 /*
3122                  * If we have less pinned bytes than we want to allocate then
3123                  * don't bother committing the transaction, it won't help us.
3124                  */
3125                 if (data_sinfo->bytes_pinned < bytes)
3126                         committed = 1;
3127                 spin_unlock(&data_sinfo->lock);
3128
3129                 /* commit the current transaction and try again */
3130 commit_trans:
3131                 if (!committed &&
3132                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3133                         committed = 1;
3134                         trans = btrfs_join_transaction(root);
3135                         if (IS_ERR(trans))
3136                                 return PTR_ERR(trans);
3137                         ret = btrfs_commit_transaction(trans, root);
3138                         if (ret)
3139                                 return ret;
3140                         goto again;
3141                 }
3142
3143                 return -ENOSPC;
3144         }
3145         data_sinfo->bytes_may_use += bytes;
3146         spin_unlock(&data_sinfo->lock);
3147
3148         return 0;
3149 }
3150
3151 /*
3152  * Called if we need to clear a data reservation for this inode.
3153  */
3154 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3155 {
3156         struct btrfs_root *root = BTRFS_I(inode)->root;
3157         struct btrfs_space_info *data_sinfo;
3158
3159         /* make sure bytes are sectorsize aligned */
3160         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3161
3162         data_sinfo = BTRFS_I(inode)->space_info;
3163         spin_lock(&data_sinfo->lock);
3164         data_sinfo->bytes_may_use -= bytes;
3165         spin_unlock(&data_sinfo->lock);
3166 }
3167
3168 static void force_metadata_allocation(struct btrfs_fs_info *info)
3169 {
3170         struct list_head *head = &info->space_info;
3171         struct btrfs_space_info *found;
3172
3173         rcu_read_lock();
3174         list_for_each_entry_rcu(found, head, list) {
3175                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3176                         found->force_alloc = CHUNK_ALLOC_FORCE;
3177         }
3178         rcu_read_unlock();
3179 }
3180
3181 static int should_alloc_chunk(struct btrfs_root *root,
3182                               struct btrfs_space_info *sinfo, u64 alloc_bytes,
3183                               int force)
3184 {
3185         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3186         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3187         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3188         u64 thresh;
3189
3190         if (force == CHUNK_ALLOC_FORCE)
3191                 return 1;
3192
3193         /*
3194          * We need to take into account the global rsv because for all intents
3195          * and purposes it's used space.  Don't worry about locking the
3196          * global_rsv, it doesn't change except when the transaction commits.
3197          */
3198         num_allocated += global_rsv->size;
3199
3200         /*
3201          * in limited mode, we want to have some free space up to
3202          * about 1% of the FS size.
3203          */
3204         if (force == CHUNK_ALLOC_LIMITED) {
3205                 thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
3206                 thresh = max_t(u64, 64 * 1024 * 1024,
3207                                div_factor_fine(thresh, 1));
3208
3209                 if (num_bytes - num_allocated < thresh)
3210                         return 1;
3211         }
3212
3213         /*
3214          * we have two similar checks here, one based on percentage
3215          * and once based on a hard number of 256MB.  The idea
3216          * is that if we have a good amount of free
3217          * room, don't allocate a chunk.  A good mount is
3218          * less than 80% utilized of the chunks we have allocated,
3219          * or more than 256MB free
3220          */
3221         if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes)
3222                 return 0;
3223
3224         if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
3225                 return 0;
3226
3227         thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
3228
3229         /* 256MB or 5% of the FS */
3230         thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));
3231
3232         if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3))
3233                 return 0;
3234         return 1;
3235 }
3236
3237 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3238                           struct btrfs_root *extent_root, u64 alloc_bytes,
3239                           u64 flags, int force)
3240 {
3241         struct btrfs_space_info *space_info;
3242         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3243         int wait_for_alloc = 0;
3244         int ret = 0;
3245
3246         flags = btrfs_reduce_alloc_profile(extent_root, flags);
3247
3248         space_info = __find_space_info(extent_root->fs_info, flags);
3249         if (!space_info) {
3250                 ret = update_space_info(extent_root->fs_info, flags,
3251                                         0, 0, &space_info);
3252                 BUG_ON(ret);
3253         }
3254         BUG_ON(!space_info);
3255
3256 again:
3257         spin_lock(&space_info->lock);
3258         if (space_info->force_alloc)
3259                 force = space_info->force_alloc;
3260         if (space_info->full) {
3261                 spin_unlock(&space_info->lock);
3262                 return 0;
3263         }
3264
3265         if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
3266                 spin_unlock(&space_info->lock);
3267                 return 0;
3268         } else if (space_info->chunk_alloc) {
3269                 wait_for_alloc = 1;
3270         } else {
3271                 space_info->chunk_alloc = 1;
3272         }
3273
3274         spin_unlock(&space_info->lock);
3275
3276         mutex_lock(&fs_info->chunk_mutex);
3277
3278         /*
3279          * The chunk_mutex is held throughout the entirety of a chunk
3280          * allocation, so once we've acquired the chunk_mutex we know that the
3281          * other guy is done and we need to recheck and see if we should
3282          * allocate.
3283          */
3284         if (wait_for_alloc) {
3285                 mutex_unlock(&fs_info->chunk_mutex);
3286                 wait_for_alloc = 0;
3287                 goto again;
3288         }
3289
3290         /*
3291          * If we have mixed data/metadata chunks we want to make sure we keep
3292          * allocating mixed chunks instead of individual chunks.
3293          */
3294         if (btrfs_mixed_space_info(space_info))
3295                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3296
3297         /*
3298          * if we're doing a data chunk, go ahead and make sure that
3299          * we keep a reasonable number of metadata chunks allocated in the
3300          * FS as well.
3301          */
3302         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3303                 fs_info->data_chunk_allocations++;
3304                 if (!(fs_info->data_chunk_allocations %
3305                       fs_info->metadata_ratio))
3306                         force_metadata_allocation(fs_info);
3307         }
3308
3309         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3310         if (ret < 0 && ret != -ENOSPC)
3311                 goto out;
3312
3313         spin_lock(&space_info->lock);
3314         if (ret)
3315                 space_info->full = 1;
3316         else
3317                 ret = 1;
3318
3319         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3320         space_info->chunk_alloc = 0;
3321         spin_unlock(&space_info->lock);
3322 out:
3323         mutex_unlock(&extent_root->fs_info->chunk_mutex);
3324         return ret;
3325 }
3326
3327 /*
3328  * shrink metadata reservation for delalloc
3329  */
3330 static int shrink_delalloc(struct btrfs_trans_handle *trans,
3331                            struct btrfs_root *root, u64 to_reclaim, int sync)
3332 {
3333         struct btrfs_block_rsv *block_rsv;
3334         struct btrfs_space_info *space_info;
3335         u64 reserved;
3336         u64 max_reclaim;
3337         u64 reclaimed = 0;
3338         long time_left;
3339         int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3340         int loops = 0;
3341         unsigned long progress;
3342
3343         block_rsv = &root->fs_info->delalloc_block_rsv;
3344         space_info = block_rsv->space_info;
3345
3346         smp_mb();
3347         reserved = space_info->bytes_may_use;
3348         progress = space_info->reservation_progress;
3349
3350         if (reserved == 0)
3351                 return 0;
3352
3353         smp_mb();
3354         if (root->fs_info->delalloc_bytes == 0) {
3355                 if (trans)
3356                         return 0;
3357                 btrfs_wait_ordered_extents(root, 0, 0);
3358                 return 0;
3359         }
3360
3361         max_reclaim = min(reserved, to_reclaim);
3362
3363         while (loops < 1024) {
3364                 /* have the flusher threads jump in and do some IO */
3365                 smp_mb();
3366                 nr_pages = min_t(unsigned long, nr_pages,
3367                        root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
3368                 writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
3369
3370                 spin_lock(&space_info->lock);
3371                 if (reserved > space_info->bytes_may_use)
3372                         reclaimed += reserved - space_info->bytes_may_use;
3373                 reserved = space_info->bytes_may_use;
3374                 spin_unlock(&space_info->lock);
3375
3376                 loops++;
3377
3378                 if (reserved == 0 || reclaimed >= max_reclaim)
3379                         break;
3380
3381                 if (trans && trans->transaction->blocked)
3382                         return -EAGAIN;
3383
3384                 time_left = schedule_timeout_interruptible(1);
3385
3386                 /* We were interrupted, exit */
3387                 if (time_left)
3388                         break;
3389
3390                 /* we've kicked the IO a few times, if anything has been freed,
3391                  * exit.  There is no sense in looping here for a long time
3392                  * when we really need to commit the transaction, or there are
3393                  * just too many writers without enough free space
3394                  */
3395
3396                 if (loops > 3) {
3397                         smp_mb();
3398                         if (progress != space_info->reservation_progress)
3399                                 break;
3400                 }
3401
3402         }
3403         if (reclaimed >= to_reclaim && !trans)
3404                 btrfs_wait_ordered_extents(root, 0, 0);
3405         return reclaimed >= to_reclaim;
3406 }
3407
3408 /**
3409  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
3410  * @root - the root we're allocating for
3411  * @block_rsv - the block_rsv we're allocating for
3412  * @orig_bytes - the number of bytes we want
3413  * @flush - wether or not we can flush to make our reservation
3414  * @check - wether this is just to check if we have enough space or not
3415  *
3416  * This will reserve orgi_bytes number of bytes from the space info associated
3417  * with the block_rsv.  If there is not enough space it will make an attempt to
3418  * flush out space to make room.  It will do this by flushing delalloc if
3419  * possible or committing the transaction.  If flush is 0 then no attempts to
3420  * regain reservations will be made and this will fail if there is not enough
3421  * space already.
3422  */
3423 static int reserve_metadata_bytes(struct btrfs_root *root,
3424                                   struct btrfs_block_rsv *block_rsv,
3425                                   u64 orig_bytes, int flush, int check)
3426 {
3427         struct btrfs_space_info *space_info = block_rsv->space_info;
3428         struct btrfs_trans_handle *trans;
3429         u64 used;
3430         u64 num_bytes = orig_bytes;
3431         int retries = 0;
3432         int ret = 0;
3433         bool committed = false;
3434         bool flushing = false;
3435
3436         trans = (struct btrfs_trans_handle *)current->journal_info;
3437 again:
3438         ret = 0;
3439         spin_lock(&space_info->lock);
3440         /*
3441          * We only want to wait if somebody other than us is flushing and we are
3442          * actually alloed to flush.
3443          */
3444         while (flush && !flushing && space_info->flush) {
3445                 spin_unlock(&space_info->lock);
3446                 /*
3447                  * If we have a trans handle we can't wait because the flusher
3448                  * may have to commit the transaction, which would mean we would
3449                  * deadlock since we are waiting for the flusher to finish, but
3450                  * hold the current transaction open.
3451                  */
3452                 if (trans)
3453                         return -EAGAIN;
3454                 ret = wait_event_interruptible(space_info->wait,
3455                                                !space_info->flush);
3456                 /* Must have been interrupted, return */
3457                 if (ret)
3458                         return -EINTR;
3459
3460                 spin_lock(&space_info->lock);
3461         }
3462
3463         ret = -ENOSPC;
3464         used = space_info->bytes_used + space_info->bytes_reserved +
3465                 space_info->bytes_pinned + space_info->bytes_readonly +
3466                 space_info->bytes_may_use;
3467
3468         /*
3469          * The idea here is that we've not already over-reserved the block group
3470          * then we can go ahead and save our reservation first and then start
3471          * flushing if we need to.  Otherwise if we've already overcommitted
3472          * lets start flushing stuff first and then come back and try to make
3473          * our reservation.
3474          */
3475         if (used <= space_info->total_bytes) {
3476                 if (used + orig_bytes <= space_info->total_bytes) {
3477                         space_info->bytes_may_use += orig_bytes;
3478                         ret = 0;
3479                 } else {
3480                         /*
3481                          * Ok set num_bytes to orig_bytes since we aren't
3482                          * overocmmitted, this way we only try and reclaim what
3483                          * we need.
3484                          */
3485                         num_bytes = orig_bytes;
3486                 }
3487         } else {
3488                 /*
3489                  * Ok we're over committed, set num_bytes to the overcommitted
3490                  * amount plus the amount of bytes that we need for this
3491                  * reservation.
3492                  */
3493                 num_bytes = used - space_info->total_bytes +
3494                         (orig_bytes * (retries + 1));
3495         }
3496
3497         if (ret && !check) {
3498                 u64 profile = btrfs_get_alloc_profile(root, 0);
3499                 u64 avail;
3500
3501                 spin_lock(&root->fs_info->free_chunk_lock);
3502                 avail = root->fs_info->free_chunk_space;
3503
3504                 /*
3505                  * If we have dup, raid1 or raid10 then only half of the free
3506                  * space is actually useable.
3507                  */
3508                 if (profile & (BTRFS_BLOCK_GROUP_DUP |
3509                                BTRFS_BLOCK_GROUP_RAID1 |
3510                                BTRFS_BLOCK_GROUP_RAID10))
3511                         avail >>= 1;
3512
3513                 /*
3514                  * If we aren't flushing don't let us overcommit too much, say
3515                  * 1/8th of the space.  If we can flush, let it overcommit up to
3516                  * 1/2 of the space.
3517                  */
3518                 if (flush)
3519                         avail >>= 3;
3520                 else
3521                         avail >>= 1;
3522                  spin_unlock(&root->fs_info->free_chunk_lock);
3523
3524                 if (used + num_bytes < space_info->total_bytes + avail) {
3525                         space_info->bytes_may_use += orig_bytes;
3526                         ret = 0;
3527                 }
3528         }
3529
3530         /*
3531          * Couldn't make our reservation, save our place so while we're trying
3532          * to reclaim space we can actually use it instead of somebody else
3533          * stealing it from us.
3534          */
3535         if (ret && flush) {
3536                 flushing = true;
3537                 space_info->flush = 1;
3538         }
3539
3540         spin_unlock(&space_info->lock);
3541
3542         if (!ret || !flush)
3543                 goto out;
3544
3545         /*
3546          * We do synchronous shrinking since we don't actually unreserve
3547          * metadata until after the IO is completed.
3548          */
3549         ret = shrink_delalloc(trans, root, num_bytes, 1);
3550         if (ret < 0)
3551                 goto out;
3552
3553         ret = 0;
3554
3555         /*
3556          * So if we were overcommitted it's possible that somebody else flushed
3557          * out enough space and we simply didn't have enough space to reclaim,
3558          * so go back around and try again.
3559          */
3560         if (retries < 2) {
3561                 retries++;
3562                 goto again;
3563         }
3564
3565         /*
3566          * Not enough space to be reclaimed, don't bother committing the
3567          * transaction.
3568          */
3569         spin_lock(&space_info->lock);
3570         if (space_info->bytes_pinned < orig_bytes)
3571                 ret = -ENOSPC;
3572         spin_unlock(&space_info->lock);
3573         if (ret)
3574                 goto out;
3575
3576         ret = -EAGAIN;
3577         if (trans)
3578                 goto out;
3579
3580         ret = -ENOSPC;
3581         if (committed)
3582                 goto out;
3583
3584         trans = btrfs_join_transaction(root);
3585         if (IS_ERR(trans))
3586                 goto out;
3587         ret = btrfs_commit_transaction(trans, root);
3588         if (!ret) {
3589                 trans = NULL;
3590                 committed = true;
3591                 goto again;
3592         }
3593
3594 out:
3595         if (flushing) {
3596                 spin_lock(&space_info->lock);
3597                 space_info->flush = 0;
3598                 wake_up_all(&space_info->wait);
3599                 spin_unlock(&space_info->lock);
3600         }
3601         return ret;
3602 }
3603
3604 static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
3605                                              struct btrfs_root *root)
3606 {
3607         struct btrfs_block_rsv *block_rsv = NULL;
3608
3609         if (root->ref_cows || root == root->fs_info->csum_root)
3610                 block_rsv = trans->block_rsv;
3611
3612         if (!block_rsv)
3613                 block_rsv = root->block_rsv;
3614
3615         if (!block_rsv)
3616                 block_rsv = &root->fs_info->empty_block_rsv;
3617
3618         return block_rsv;
3619 }
3620
3621 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
3622                                u64 num_bytes)
3623 {
3624         int ret = -ENOSPC;
3625         spin_lock(&block_rsv->lock);
3626         if (block_rsv->reserved >= num_bytes) {
3627                 block_rsv->reserved -= num_bytes;
3628                 if (block_rsv->reserved < block_rsv->size)
3629                         block_rsv->full = 0;
3630                 ret = 0;
3631         }
3632         spin_unlock(&block_rsv->lock);
3633         return ret;
3634 }
3635
3636 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
3637                                 u64 num_bytes, int update_size)
3638 {
3639         spin_lock(&block_rsv->lock);
3640         block_rsv->reserved += num_bytes;
3641         if (update_size)
3642                 block_rsv->size += num_bytes;
3643         else if (block_rsv->reserved >= block_rsv->size)
3644                 block_rsv->full = 1;
3645         spin_unlock(&block_rsv->lock);
3646 }
3647
3648 static void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
3649                                     struct btrfs_block_rsv *dest, u64 num_bytes)
3650 {
3651         struct btrfs_space_info *space_info = block_rsv->space_info;
3652
3653         spin_lock(&block_rsv->lock);
3654         if (num_bytes == (u64)-1)
3655                 num_bytes = block_rsv->size;
3656         block_rsv->size -= num_bytes;
3657         if (block_rsv->reserved >= block_rsv->size) {
3658                 num_bytes = block_rsv->reserved - block_rsv->size;
3659                 block_rsv->reserved = block_rsv->size;
3660                 block_rsv->full = 1;
3661         } else {
3662                 num_bytes = 0;
3663         }
3664         spin_unlock(&block_rsv->lock);
3665
3666         if (num_bytes > 0) {
3667                 if (dest) {
3668                         spin_lock(&dest->lock);
3669                         if (!dest->full) {
3670                                 u64 bytes_to_add;
3671
3672                                 bytes_to_add = dest->size - dest->reserved;
3673                                 bytes_to_add = min(num_bytes, bytes_to_add);
3674                                 dest->reserved += bytes_to_add;
3675                                 if (dest->reserved >= dest->size)
3676                                         dest->full = 1;
3677                                 num_bytes -= bytes_to_add;
3678                         }
3679                         spin_unlock(&dest->lock);
3680                 }
3681                 if (num_bytes) {
3682                         spin_lock(&space_info->lock);
3683                         space_info->bytes_may_use -= num_bytes;
3684                         space_info->reservation_progress++;
3685                         spin_unlock(&space_info->lock);
3686                 }
3687         }
3688 }
3689
3690 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
3691                                    struct btrfs_block_rsv *dst, u64 num_bytes)
3692 {
3693         int ret;
3694
3695         ret = block_rsv_use_bytes(src, num_bytes);
3696         if (ret)
3697                 return ret;
3698
3699         block_rsv_add_bytes(dst, num_bytes, 1);
3700         return 0;
3701 }
3702
3703 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
3704 {
3705         memset(rsv, 0, sizeof(*rsv));
3706         spin_lock_init(&rsv->lock);
3707 }
3708
3709 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
3710 {
3711         struct btrfs_block_rsv *block_rsv;
3712         struct btrfs_fs_info *fs_info = root->fs_info;
3713
3714         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
3715         if (!block_rsv)
3716                 return NULL;
3717
3718         btrfs_init_block_rsv(block_rsv);
3719         block_rsv->space_info = __find_space_info(fs_info,
3720                                                   BTRFS_BLOCK_GROUP_METADATA);
3721         return block_rsv;
3722 }
3723
3724 void btrfs_free_block_rsv(struct btrfs_root *root,
3725                           struct btrfs_block_rsv *rsv)
3726 {
3727         btrfs_block_rsv_release(root, rsv, (u64)-1);
3728         kfree(rsv);
3729 }
3730
3731 int btrfs_block_rsv_add(struct btrfs_root *root,
3732                         struct btrfs_block_rsv *block_rsv,
3733                         u64 num_bytes)
3734 {
3735         int ret;
3736
3737         if (num_bytes == 0)
3738                 return 0;
3739
3740         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1, 0);
3741         if (!ret) {
3742                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3743                 return 0;
3744         }
3745
3746         return ret;
3747 }
3748
3749 int btrfs_block_rsv_check(struct btrfs_root *root,
3750                           struct btrfs_block_rsv *block_rsv,
3751                           u64 min_reserved, int min_factor, int flush)
3752 {
3753         u64 num_bytes = 0;
3754         int ret = -ENOSPC;
3755
3756         if (!block_rsv)
3757                 return 0;
3758
3759         spin_lock(&block_rsv->lock);
3760         if (min_factor > 0)
3761                 num_bytes = div_factor(block_rsv->size, min_factor);
3762         if (min_reserved > num_bytes)
3763                 num_bytes = min_reserved;
3764
3765         if (block_rsv->reserved >= num_bytes)
3766                 ret = 0;
3767         else
3768                 num_bytes -= block_rsv->reserved;
3769         spin_unlock(&block_rsv->lock);
3770
3771         if (!ret)
3772                 return 0;
3773
3774         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush, !flush);
3775         if (!ret) {
3776                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
3777                 return 0;
3778         }
3779
3780         return ret;
3781 }
3782
3783 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
3784                             struct btrfs_block_rsv *dst_rsv,
3785                             u64 num_bytes)
3786 {
3787         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3788 }
3789
3790 void btrfs_block_rsv_release(struct btrfs_root *root,
3791                              struct btrfs_block_rsv *block_rsv,
3792                              u64 num_bytes)
3793 {
3794         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3795         if (global_rsv->full || global_rsv == block_rsv ||
3796             block_rsv->space_info != global_rsv->space_info)
3797                 global_rsv = NULL;
3798         block_rsv_release_bytes(block_rsv, global_rsv, num_bytes);
3799 }
3800
3801 /*
3802  * helper to calculate size of global block reservation.
3803  * the desired value is sum of space used by extent tree,
3804  * checksum tree and root tree
3805  */
3806 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
3807 {
3808         struct btrfs_space_info *sinfo;
3809         u64 num_bytes;
3810         u64 meta_used;
3811         u64 data_used;
3812         int csum_size = btrfs_super_csum_size(&fs_info->super_copy);
3813
3814         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
3815         spin_lock(&sinfo->lock);
3816         data_used = sinfo->bytes_used;
3817         spin_unlock(&sinfo->lock);
3818
3819         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3820         spin_lock(&sinfo->lock);
3821         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
3822                 data_used = 0;
3823         meta_used = sinfo->bytes_used;
3824         spin_unlock(&sinfo->lock);
3825
3826         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
3827                     csum_size * 2;
3828         num_bytes += div64_u64(data_used + meta_used, 50);
3829
3830         if (num_bytes * 3 > meta_used)
3831                 num_bytes = div64_u64(meta_used, 3);
3832
3833         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
3834 }
3835
3836 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
3837 {
3838         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
3839         struct btrfs_space_info *sinfo = block_rsv->space_info;
3840         u64 num_bytes;
3841
3842         num_bytes = calc_global_metadata_size(fs_info);
3843
3844         spin_lock(&block_rsv->lock);
3845         spin_lock(&sinfo->lock);
3846
3847         block_rsv->size = num_bytes;
3848
3849         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
3850                     sinfo->bytes_reserved + sinfo->bytes_readonly +
3851                     sinfo->bytes_may_use;
3852
3853         if (sinfo->total_bytes > num_bytes) {
3854                 num_bytes = sinfo->total_bytes - num_bytes;
3855                 block_rsv->reserved += num_bytes;
3856                 sinfo->bytes_may_use += num_bytes;
3857         }
3858
3859         if (block_rsv->reserved >= block_rsv->size) {
3860                 num_bytes = block_rsv->reserved - block_rsv->size;
3861                 sinfo->bytes_may_use -= num_bytes;
3862                 sinfo->reservation_progress++;
3863                 block_rsv->reserved = block_rsv->size;
3864                 block_rsv->full = 1;
3865         }
3866
3867         spin_unlock(&sinfo->lock);
3868         spin_unlock(&block_rsv->lock);
3869 }
3870
3871 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
3872 {
3873         struct btrfs_space_info *space_info;
3874
3875         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3876         fs_info->chunk_block_rsv.space_info = space_info;
3877
3878         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3879         fs_info->global_block_rsv.space_info = space_info;
3880         fs_info->delalloc_block_rsv.space_info = space_info;
3881         fs_info->trans_block_rsv.space_info = space_info;
3882         fs_info->empty_block_rsv.space_info = space_info;
3883
3884         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
3885         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
3886         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
3887         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
3888         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
3889
3890         update_global_block_rsv(fs_info);
3891 }
3892
3893 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
3894 {
3895         block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1);
3896         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
3897         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
3898         WARN_ON(fs_info->trans_block_rsv.size > 0);
3899         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
3900         WARN_ON(fs_info->chunk_block_rsv.size > 0);
3901         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
3902 }
3903
3904 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
3905                                   struct btrfs_root *root)
3906 {
3907         struct btrfs_block_rsv *block_rsv;
3908
3909         if (!trans->bytes_reserved)
3910                 return;
3911
3912         block_rsv = &root->fs_info->trans_block_rsv;
3913         btrfs_block_rsv_release(root, block_rsv, trans->bytes_reserved);
3914         trans->bytes_reserved = 0;
3915 }
3916
3917 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
3918                                   struct inode *inode)
3919 {
3920         struct btrfs_root *root = BTRFS_I(inode)->root;
3921         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3922         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
3923
3924         /*
3925          * We need to hold space in order to delete our orphan item once we've
3926          * added it, so this takes the reservation so we can release it later
3927          * when we are truly done with the orphan item.
3928          */
3929         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
3930         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3931 }
3932
3933 void btrfs_orphan_release_metadata(struct inode *inode)
3934 {
3935         struct btrfs_root *root = BTRFS_I(inode)->root;
3936         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
3937         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
3938 }
3939
3940 int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
3941                                 struct btrfs_pending_snapshot *pending)
3942 {
3943         struct btrfs_root *root = pending->root;
3944         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3945         struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
3946         /*
3947          * two for root back/forward refs, two for directory entries
3948          * and one for root of the snapshot.
3949          */
3950         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
3951         dst_rsv->space_info = src_rsv->space_info;
3952         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3953 }
3954
3955 /**
3956  * drop_outstanding_extent - drop an outstanding extent
3957  * @inode: the inode we're dropping the extent for
3958  *
3959  * This is called when we are freeing up an outstanding extent, either called
3960  * after an error or after an extent is written.  This will return the number of
3961  * reserved extents that need to be freed.  This must be called with
3962  * BTRFS_I(inode)->lock held.
3963  */
3964 static unsigned drop_outstanding_extent(struct inode *inode)
3965 {
3966         unsigned dropped_extents = 0;
3967
3968         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
3969         BTRFS_I(inode)->outstanding_extents--;
3970
3971         /*
3972          * If we have more or the same amount of outsanding extents than we have
3973          * reserved then we need to leave the reserved extents count alone.
3974          */
3975         if (BTRFS_I(inode)->outstanding_extents >=
3976             BTRFS_I(inode)->reserved_extents)
3977                 return 0;
3978
3979         dropped_extents = BTRFS_I(inode)->reserved_extents -
3980                 BTRFS_I(inode)->outstanding_extents;
3981         BTRFS_I(inode)->reserved_extents -= dropped_extents;
3982         return dropped_extents;
3983 }
3984
3985 /**
3986  * calc_csum_metadata_size - return the amount of metada space that must be
3987  *      reserved/free'd for the given bytes.
3988  * @inode: the inode we're manipulating
3989  * @num_bytes: the number of bytes in question
3990  * @reserve: 1 if we are reserving space, 0 if we are freeing space
3991  *
3992  * This adjusts the number of csum_bytes in the inode and then returns the
3993  * correct amount of metadata that must either be reserved or freed.  We
3994  * calculate how many checksums we can fit into one leaf and then divide the
3995  * number of bytes that will need to be checksumed by this value to figure out
3996  * how many checksums will be required.  If we are adding bytes then the number
3997  * may go up and we will return the number of additional bytes that must be
3998  * reserved.  If it is going down we will return the number of bytes that must
3999  * be freed.
4000  *
4001  * This must be called with BTRFS_I(inode)->lock held.
4002  */
4003 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4004                                    int reserve)
4005 {
4006         struct btrfs_root *root = BTRFS_I(inode)->root;
4007         u64 csum_size;
4008         int num_csums_per_leaf;
4009         int num_csums;
4010         int old_csums;
4011
4012         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4013             BTRFS_I(inode)->csum_bytes == 0)
4014                 return 0;
4015
4016         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4017         if (reserve)
4018                 BTRFS_I(inode)->csum_bytes += num_bytes;
4019         else
4020                 BTRFS_I(inode)->csum_bytes -= num_bytes;
4021         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4022         num_csums_per_leaf = (int)div64_u64(csum_size,
4023                                             sizeof(struct btrfs_csum_item) +
4024                                             sizeof(struct btrfs_disk_key));
4025         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4026         num_csums = num_csums + num_csums_per_leaf - 1;
4027         num_csums = num_csums / num_csums_per_leaf;
4028
4029         old_csums = old_csums + num_csums_per_leaf - 1;
4030         old_csums = old_csums / num_csums_per_leaf;
4031
4032         /* No change, no need to reserve more */
4033         if (old_csums == num_csums)
4034                 return 0;
4035
4036         if (reserve)
4037                 return btrfs_calc_trans_metadata_size(root,
4038                                                       num_csums - old_csums);
4039
4040         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4041 }
4042
4043 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4044 {
4045         struct btrfs_root *root = BTRFS_I(inode)->root;
4046         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4047         u64 to_reserve = 0;
4048         unsigned nr_extents = 0;
4049         int flush = 1;
4050         int ret;
4051
4052         if (btrfs_is_free_space_inode(root, inode))
4053                 flush = 0;
4054
4055         if (flush && btrfs_transaction_in_commit(root->fs_info))
4056                 schedule_timeout(1);
4057
4058         num_bytes = ALIGN(num_bytes, root->sectorsize);
4059
4060         spin_lock(&BTRFS_I(inode)->lock);
4061         BTRFS_I(inode)->outstanding_extents++;
4062
4063         if (BTRFS_I(inode)->outstanding_extents >
4064             BTRFS_I(inode)->reserved_extents) {
4065                 nr_extents = BTRFS_I(inode)->outstanding_extents -
4066                         BTRFS_I(inode)->reserved_extents;
4067                 BTRFS_I(inode)->reserved_extents += nr_extents;
4068
4069                 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4070         }
4071         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4072         spin_unlock(&BTRFS_I(inode)->lock);
4073
4074         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush, 0);
4075         if (ret) {
4076                 u64 to_free = 0;
4077                 unsigned dropped;
4078
4079                 spin_lock(&BTRFS_I(inode)->lock);
4080                 dropped = drop_outstanding_extent(inode);
4081                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4082                 spin_unlock(&BTRFS_I(inode)->lock);
4083                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4084
4085                 /*
4086                  * Somebody could have come in and twiddled with the
4087                  * reservation, so if we have to free more than we would have
4088                  * reserved from this reservation go ahead and release those
4089                  * bytes.
4090                  */
4091                 to_free -= to_reserve;
4092                 if (to_free)
4093                         btrfs_block_rsv_release(root, block_rsv, to_free);
4094                 return ret;
4095         }
4096
4097         block_rsv_add_bytes(block_rsv, to_reserve, 1);
4098
4099         return 0;
4100 }
4101
4102 /**
4103  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
4104  * @inode: the inode to release the reservation for
4105  * @num_bytes: the number of bytes we're releasing
4106  *
4107  * This will release the metadata reservation for an inode.  This can be called
4108  * once we complete IO for a given set of bytes to release their metadata
4109  * reservations.
4110  */
4111 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4112 {
4113         struct btrfs_root *root = BTRFS_I(inode)->root;
4114         u64 to_free = 0;
4115         unsigned dropped;
4116
4117         num_bytes = ALIGN(num_bytes, root->sectorsize);
4118         spin_lock(&BTRFS_I(inode)->lock);
4119         dropped = drop_outstanding_extent(inode);
4120
4121         to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4122         spin_unlock(&BTRFS_I(inode)->lock);
4123         if (dropped > 0)
4124                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4125
4126         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4127                                 to_free);
4128 }
4129
4130 /**
4131  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
4132  * @inode: inode we're writing to
4133  * @num_bytes: the number of bytes we want to allocate
4134  *
4135  * This will do the following things
4136  *
4137  * o reserve space in the data space info for num_bytes
4138  * o reserve space in the metadata space info based on number of outstanding
4139  *   extents and how much csums will be needed
4140  * o add to the inodes ->delalloc_bytes
4141  * o add it to the fs_info's delalloc inodes list.
4142  *
4143  * This will return 0 for success and -ENOSPC if there is no space left.
4144  */
4145 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4146 {
4147         int ret;
4148
4149         ret = btrfs_check_data_free_space(inode, num_bytes);
4150         if (ret)
4151                 return ret;
4152
4153         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4154         if (ret) {
4155                 btrfs_free_reserved_data_space(inode, num_bytes);
4156                 return ret;
4157         }
4158
4159         return 0;
4160 }
4161
4162 /**
4163  * btrfs_delalloc_release_space - release data and metadata space for delalloc
4164  * @inode: inode we're releasing space for
4165  * @num_bytes: the number of bytes we want to free up
4166  *
4167  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
4168  * called in the case that we don't need the metadata AND data reservations
4169  * anymore.  So if there is an error or we insert an inline extent.
4170  *
4171  * This function will release the metadata space that was not used and will
4172  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
4173  * list if there are no delalloc bytes left.
4174  */
4175 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4176 {
4177         btrfs_delalloc_release_metadata(inode, num_bytes);
4178         btrfs_free_reserved_data_space(inode, num_bytes);
4179 }
4180
4181 static int update_block_group(struct btrfs_trans_handle *trans,
4182                               struct btrfs_root *root,
4183                               u64 bytenr, u64 num_bytes, int alloc)
4184 {
4185         struct btrfs_block_group_cache *cache = NULL;
4186         struct btrfs_fs_info *info = root->fs_info;
4187         u64 total = num_bytes;
4188         u64 old_val;
4189         u64 byte_in_group;
4190         int factor;
4191
4192         /* block accounting for super block */
4193         spin_lock(&info->delalloc_lock);
4194         old_val = btrfs_super_bytes_used(&info->super_copy);
4195         if (alloc)
4196                 old_val += num_bytes;
4197         else
4198                 old_val -= num_bytes;
4199         btrfs_set_super_bytes_used(&info->super_copy, old_val);
4200         spin_unlock(&info->delalloc_lock);
4201
4202         while (total) {
4203                 cache = btrfs_lookup_block_group(info, bytenr);
4204                 if (!cache)
4205                         return -1;
4206                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4207                                     BTRFS_BLOCK_GROUP_RAID1 |
4208                                     BTRFS_BLOCK_GROUP_RAID10))
4209                         factor = 2;
4210                 else
4211                         factor = 1;
4212                 /*
4213                  * If this block group has free space cache written out, we
4214                  * need to make sure to load it if we are removing space.  This
4215                  * is because we need the unpinning stage to actually add the
4216                  * space back to the block group, otherwise we will leak space.
4217                  */
4218                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
4219                         cache_block_group(cache, trans, NULL, 1);
4220
4221                 byte_in_group = bytenr - cache->key.objectid;
4222                 WARN_ON(byte_in_group > cache->key.offset);
4223
4224                 spin_lock(&cache->space_info->lock);
4225                 spin_lock(&cache->lock);
4226
4227                 if (btrfs_test_opt(root, SPACE_CACHE) &&
4228                     cache->disk_cache_state < BTRFS_DC_CLEAR)
4229                         cache->disk_cache_state = BTRFS_DC_CLEAR;
4230
4231                 cache->dirty = 1;
4232                 old_val = btrfs_block_group_used(&cache->item);
4233                 num_bytes = min(total, cache->key.offset - byte_in_group);
4234                 if (alloc) {
4235                         old_val += num_bytes;
4236                         btrfs_set_block_group_used(&cache->item, old_val);
4237                         cache->reserved -= num_bytes;
4238                         cache->space_info->bytes_reserved -= num_bytes;
4239                         cache->space_info->bytes_used += num_bytes;
4240                         cache->space_info->disk_used += num_bytes * factor;
4241                         spin_unlock(&cache->lock);
4242                         spin_unlock(&cache->space_info->lock);
4243                 } else {
4244                         old_val -= num_bytes;
4245                         btrfs_set_block_group_used(&cache->item, old_val);
4246                         cache->pinned += num_bytes;
4247                         cache->space_info->bytes_pinned += num_bytes;
4248                         cache->space_info->bytes_used -= num_bytes;
4249                         cache->space_info->disk_used -= num_bytes * factor;
4250                         spin_unlock(&cache->lock);
4251                         spin_unlock(&cache->space_info->lock);
4252
4253                         set_extent_dirty(info->pinned_extents,
4254                                          bytenr, bytenr + num_bytes - 1,
4255                                          GFP_NOFS | __GFP_NOFAIL);
4256                 }
4257                 btrfs_put_block_group(cache);
4258                 total -= num_bytes;
4259                 bytenr += num_bytes;
4260         }
4261         return 0;
4262 }
4263
4264 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4265 {
4266         struct btrfs_block_group_cache *cache;
4267         u64 bytenr;
4268
4269         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4270         if (!cache)
4271                 return 0;
4272
4273         bytenr = cache->key.objectid;
4274         btrfs_put_block_group(cache);
4275
4276         return bytenr;
4277 }
4278
4279 static int pin_down_extent(struct btrfs_root *root,
4280                            struct btrfs_block_group_cache *cache,
4281                            u64 bytenr, u64 num_bytes, int reserved)
4282 {
4283         spin_lock(&cache->space_info->lock);
4284         spin_lock(&cache->lock);
4285         cache->pinned += num_bytes;
4286         cache->space_info->bytes_pinned += num_bytes;
4287         if (reserved) {
4288                 cache->reserved -= num_bytes;
4289                 cache->space_info->bytes_reserved -= num_bytes;
4290         }
4291         spin_unlock(&cache->lock);
4292         spin_unlock(&cache->space_info->lock);
4293
4294         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4295                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4296         return 0;
4297 }
4298
4299 /*
4300  * this function must be called within transaction
4301  */
4302 int btrfs_pin_extent(struct btrfs_root *root,
4303                      u64 bytenr, u64 num_bytes, int reserved)
4304 {
4305         struct btrfs_block_group_cache *cache;
4306
4307         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4308         BUG_ON(!cache);
4309
4310         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4311
4312         btrfs_put_block_group(cache);
4313         return 0;
4314 }
4315
4316 /**
4317  * btrfs_update_reserved_bytes - update the block_group and space info counters
4318  * @cache:      The cache we are manipulating
4319  * @num_bytes:  The number of bytes in question
4320  * @reserve:    One of the reservation enums
4321  *
4322  * This is called by the allocator when it reserves space, or by somebody who is
4323  * freeing space that was never actually used on disk.  For example if you
4324  * reserve some space for a new leaf in transaction A and before transaction A
4325  * commits you free that leaf, you call this with reserve set to 0 in order to
4326  * clear the reservation.
4327  *
4328  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
4329  * ENOSPC accounting.  For data we handle the reservation through clearing the
4330  * delalloc bits in the io_tree.  We have to do this since we could end up
4331  * allocating less disk space for the amount of data we have reserved in the
4332  * case of compression.
4333  *
4334  * If this is a reservation and the block group has become read only we cannot
4335  * make the reservation and return -EAGAIN, otherwise this function always
4336  * succeeds.
4337  */
4338 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4339                                        u64 num_bytes, int reserve)
4340 {
4341         struct btrfs_space_info *space_info = cache->space_info;
4342         int ret = 0;
4343         spin_lock(&space_info->lock);
4344         spin_lock(&cache->lock);
4345         if (reserve != RESERVE_FREE) {
4346                 if (cache->ro) {
4347                         ret = -EAGAIN;
4348                 } else {
4349                         cache->reserved += num_bytes;
4350                         space_info->bytes_reserved += num_bytes;
4351                         if (reserve == RESERVE_ALLOC) {
4352                                 BUG_ON(space_info->bytes_may_use < num_bytes);
4353                                 space_info->bytes_may_use -= num_bytes;
4354                         }
4355                 }
4356         } else {
4357                 if (cache->ro)
4358                         space_info->bytes_readonly += num_bytes;
4359                 cache->reserved -= num_bytes;
4360                 space_info->bytes_reserved -= num_bytes;
4361                 space_info->reservation_progress++;
4362         }
4363         spin_unlock(&cache->lock);
4364         spin_unlock(&space_info->lock);
4365         return ret;
4366 }
4367
4368 int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4369                                 struct btrfs_root *root)
4370 {
4371         struct btrfs_fs_info *fs_info = root->fs_info;
4372         struct btrfs_caching_control *next;
4373         struct btrfs_caching_control *caching_ctl;
4374         struct btrfs_block_group_cache *cache;
4375
4376         down_write(&fs_info->extent_commit_sem);
4377
4378         list_for_each_entry_safe(caching_ctl, next,
4379                                  &fs_info->caching_block_groups, list) {
4380                 cache = caching_ctl->block_group;
4381                 if (block_group_cache_done(cache)) {
4382                         cache->last_byte_to_unpin = (u64)-1;
4383                         list_del_init(&caching_ctl->list);
4384                         put_caching_control(caching_ctl);
4385                 } else {
4386                         cache->last_byte_to_unpin = caching_ctl->progress;
4387                 }
4388         }
4389
4390         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4391                 fs_info->pinned_extents = &fs_info->freed_extents[1];
4392         else
4393                 fs_info->pinned_extents = &fs_info->freed_extents[0];
4394
4395         up_write(&fs_info->extent_commit_sem);
4396
4397         update_global_block_rsv(fs_info);
4398         return 0;
4399 }
4400
4401 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
4402 {
4403         struct btrfs_fs_info *fs_info = root->fs_info;
4404         struct btrfs_block_group_cache *cache = NULL;
4405         u64 len;
4406
4407         while (start <= end) {
4408                 if (!cache ||
4409                     start >= cache->key.objectid + cache->key.offset) {
4410                         if (cache)
4411                                 btrfs_put_block_group(cache);
4412                         cache = btrfs_lookup_block_group(fs_info, start);
4413                         BUG_ON(!cache);
4414                 }
4415
4416                 len = cache->key.objectid + cache->key.offset - start;
4417                 len = min(len, end + 1 - start);
4418
4419                 if (start < cache->last_byte_to_unpin) {
4420                         len = min(len, cache->last_byte_to_unpin - start);
4421                         btrfs_add_free_space(cache, start, len);
4422                 }
4423
4424                 start += len;
4425
4426                 spin_lock(&cache->space_info->lock);
4427                 spin_lock(&cache->lock);
4428                 cache->pinned -= len;
4429                 cache->space_info->bytes_pinned -= len;
4430                 if (cache->ro)
4431                         cache->space_info->bytes_readonly += len;
4432                 spin_unlock(&cache->lock);
4433                 spin_unlock(&cache->space_info->lock);
4434         }
4435
4436         if (cache)
4437                 btrfs_put_block_group(cache);
4438         return 0;
4439 }
4440
4441 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
4442                                struct btrfs_root *root)
4443 {
4444         struct btrfs_fs_info *fs_info = root->fs_info;
4445         struct extent_io_tree *unpin;
4446         u64 start;
4447         u64 end;
4448         int ret;
4449
4450         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4451                 unpin = &fs_info->freed_extents[1];
4452         else
4453                 unpin = &fs_info->freed_extents[0];
4454
4455         while (1) {
4456                 ret = find_first_extent_bit(unpin, 0, &start, &end,
4457                                             EXTENT_DIRTY);
4458                 if (ret)
4459                         break;
4460
4461                 if (btrfs_test_opt(root, DISCARD))
4462                         ret = btrfs_discard_extent(root, start,
4463                                                    end + 1 - start, NULL);
4464
4465                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
4466                 unpin_extent_range(root, start, end);
4467                 cond_resched();
4468         }
4469
4470         return 0;
4471 }
4472
4473 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4474                                 struct btrfs_root *root,
4475                                 u64 bytenr, u64 num_bytes, u64 parent,
4476                                 u64 root_objectid, u64 owner_objectid,
4477                                 u64 owner_offset, int refs_to_drop,
4478                                 struct btrfs_delayed_extent_op *extent_op)
4479 {
4480         struct btrfs_key key;
4481         struct btrfs_path *path;
4482         struct btrfs_fs_info *info = root->fs_info;
4483         struct btrfs_root *extent_root = info->extent_root;
4484         struct extent_buffer *leaf;
4485         struct btrfs_extent_item *ei;
4486         struct btrfs_extent_inline_ref *iref;
4487         int ret;
4488         int is_data;
4489         int extent_slot = 0;
4490         int found_extent = 0;
4491         int num_to_del = 1;
4492         u32 item_size;
4493         u64 refs;
4494
4495         path = btrfs_alloc_path();
4496         if (!path)
4497                 return -ENOMEM;
4498
4499         path->reada = 1;
4500         path->leave_spinning = 1;
4501
4502         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
4503         BUG_ON(!is_data && refs_to_drop != 1);
4504
4505         ret = lookup_extent_backref(trans, extent_root, path, &iref,
4506                                     bytenr, num_bytes, parent,
4507                                     root_objectid, owner_objectid,
4508                                     owner_offset);
4509         if (ret == 0) {
4510                 extent_slot = path->slots[0];
4511                 while (extent_slot >= 0) {
4512                         btrfs_item_key_to_cpu(path->nodes[0], &key,
4513                                               extent_slot);
4514                         if (key.objectid != bytenr)
4515                                 break;
4516                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
4517                             key.offset == num_bytes) {
4518                                 found_extent = 1;
4519                                 break;
4520                         }
4521                         if (path->slots[0] - extent_slot > 5)
4522                                 break;
4523                         extent_slot--;
4524                 }
4525 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4526                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
4527                 if (found_extent && item_size < sizeof(*ei))
4528                         found_extent = 0;
4529 #endif
4530                 if (!found_extent) {
4531                         BUG_ON(iref);
4532                         ret = remove_extent_backref(trans, extent_root, path,
4533                                                     NULL, refs_to_drop,
4534                                                     is_data);
4535                         BUG_ON(ret);
4536                         btrfs_release_path(path);
4537                         path->leave_spinning = 1;
4538
4539                         key.objectid = bytenr;
4540                         key.type = BTRFS_EXTENT_ITEM_KEY;
4541                         key.offset = num_bytes;
4542
4543                         ret = btrfs_search_slot(trans, extent_root,
4544                                                 &key, path, -1, 1);
4545                         if (ret) {
4546                                 printk(KERN_ERR "umm, got %d back from search"
4547                                        ", was looking for %llu\n", ret,
4548                                        (unsigned long long)bytenr);
4549                                 if (ret > 0)
4550                                         btrfs_print_leaf(extent_root,
4551                                                          path->nodes[0]);
4552                         }
4553                         BUG_ON(ret);
4554                         extent_slot = path->slots[0];
4555                 }
4556         } else {
4557                 btrfs_print_leaf(extent_root, path->nodes[0]);
4558                 WARN_ON(1);
4559                 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
4560                        "parent %llu root %llu  owner %llu offset %llu\n",
4561                        (unsigned long long)bytenr,
4562                        (unsigned long long)parent,
4563                        (unsigned long long)root_objectid,
4564                        (unsigned long long)owner_objectid,
4565                        (unsigned long long)owner_offset);
4566         }
4567
4568         leaf = path->nodes[0];
4569         item_size = btrfs_item_size_nr(leaf, extent_slot);
4570 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4571         if (item_size < sizeof(*ei)) {
4572                 BUG_ON(found_extent || extent_slot != path->slots[0]);
4573                 ret = convert_extent_item_v0(trans, extent_root, path,
4574                                              owner_objectid, 0);
4575                 BUG_ON(ret < 0);
4576
4577                 btrfs_release_path(path);
4578                 path->leave_spinning = 1;
4579
4580                 key.objectid = bytenr;
4581                 key.type = BTRFS_EXTENT_ITEM_KEY;
4582                 key.offset = num_bytes;
4583
4584                 ret = btrfs_search_slot(trans, extent_root, &key, path,
4585                                         -1, 1);
4586                 if (ret) {
4587                         printk(KERN_ERR "umm, got %d back from search"
4588                                ", was looking for %llu\n", ret,
4589                                (unsigned long long)bytenr);
4590                         btrfs_print_leaf(extent_root, path->nodes[0]);
4591                 }
4592                 BUG_ON(ret);
4593                 extent_slot = path->slots[0];
4594                 leaf = path->nodes[0];
4595                 item_size = btrfs_item_size_nr(leaf, extent_slot);
4596         }
4597 #endif
4598         BUG_ON(item_size < sizeof(*ei));
4599         ei = btrfs_item_ptr(leaf, extent_slot,
4600                             struct btrfs_extent_item);
4601         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4602                 struct btrfs_tree_block_info *bi;
4603                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
4604                 bi = (struct btrfs_tree_block_info *)(ei + 1);
4605                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
4606         }
4607
4608         refs = btrfs_extent_refs(leaf, ei);
4609         BUG_ON(refs < refs_to_drop);
4610         refs -= refs_to_drop;
4611
4612         if (refs > 0) {
4613                 if (extent_op)
4614                         __run_delayed_extent_op(extent_op, leaf, ei);
4615                 /*
4616                  * In the case of inline back ref, reference count will
4617                  * be updated by remove_extent_backref
4618                  */
4619                 if (iref) {
4620                         BUG_ON(!found_extent);
4621                 } else {
4622                         btrfs_set_extent_refs(leaf, ei, refs);
4623                         btrfs_mark_buffer_dirty(leaf);
4624                 }
4625                 if (found_extent) {
4626                         ret = remove_extent_backref(trans, extent_root, path,
4627                                                     iref, refs_to_drop,
4628                                                     is_data);
4629                         BUG_ON(ret);
4630                 }
4631         } else {
4632                 if (found_extent) {
4633                         BUG_ON(is_data && refs_to_drop !=
4634                                extent_data_ref_count(root, path, iref));
4635                         if (iref) {
4636                                 BUG_ON(path->slots[0] != extent_slot);
4637                         } else {
4638                                 BUG_ON(path->slots[0] != extent_slot + 1);
4639                                 path->slots[0] = extent_slot;
4640                                 num_to_del = 2;
4641                         }
4642                 }
4643
4644                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
4645                                       num_to_del);
4646                 BUG_ON(ret);
4647                 btrfs_release_path(path);
4648
4649                 if (is_data) {
4650                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
4651                         BUG_ON(ret);
4652                 } else {
4653                         invalidate_mapping_pages(info->btree_inode->i_mapping,
4654                              bytenr >> PAGE_CACHE_SHIFT,
4655                              (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
4656                 }
4657
4658                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
4659                 BUG_ON(ret);
4660         }
4661         btrfs_free_path(path);
4662         return ret;
4663 }
4664
4665 /*
4666  * when we free an block, it is possible (and likely) that we free the last
4667  * delayed ref for that extent as well.  This searches the delayed ref tree for
4668  * a given extent, and if there are no other delayed refs to be processed, it
4669  * removes it from the tree.
4670  */
4671 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
4672                                       struct btrfs_root *root, u64 bytenr)
4673 {
4674         struct btrfs_delayed_ref_head *head;
4675         struct btrfs_delayed_ref_root *delayed_refs;
4676         struct btrfs_delayed_ref_node *ref;
4677         struct rb_node *node;
4678         int ret = 0;
4679
4680         delayed_refs = &trans->transaction->delayed_refs;
4681         spin_lock(&delayed_refs->lock);
4682         head = btrfs_find_delayed_ref_head(trans, bytenr);
4683         if (!head)
4684                 goto out;
4685
4686         node = rb_prev(&head->node.rb_node);
4687         if (!node)
4688                 goto out;
4689
4690         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
4691
4692         /* there are still entries for this ref, we can't drop it */
4693         if (ref->bytenr == bytenr)
4694                 goto out;
4695
4696         if (head->extent_op) {
4697                 if (!head->must_insert_reserved)
4698                         goto out;
4699                 kfree(head->extent_op);
4700                 head->extent_op = NULL;
4701         }
4702
4703         /*
4704          * waiting for the lock here would deadlock.  If someone else has it
4705          * locked they are already in the process of dropping it anyway
4706          */
4707         if (!mutex_trylock(&head->mutex))
4708                 goto out;
4709
4710         /*
4711          * at this point we have a head with no other entries.  Go
4712          * ahead and process it.
4713          */
4714         head->node.in_tree = 0;
4715         rb_erase(&head->node.rb_node, &delayed_refs->root);
4716
4717         delayed_refs->num_entries--;
4718
4719         /*
4720          * we don't take a ref on the node because we're removing it from the
4721          * tree, so we just steal the ref the tree was holding.
4722          */
4723         delayed_refs->num_heads--;
4724         if (list_empty(&head->cluster))
4725                 delayed_refs->num_heads_ready--;
4726
4727         list_del_init(&head->cluster);
4728         spin_unlock(&delayed_refs->lock);
4729
4730         BUG_ON(head->extent_op);
4731         if (head->must_insert_reserved)
4732                 ret = 1;
4733
4734         mutex_unlock(&head->mutex);
4735         btrfs_put_delayed_ref(&head->node);
4736         return ret;
4737 out:
4738         spin_unlock(&delayed_refs->lock);
4739         return 0;
4740 }
4741
4742 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
4743                            struct btrfs_root *root,
4744                            struct extent_buffer *buf,
4745                            u64 parent, int last_ref)
4746 {
4747         struct btrfs_block_group_cache *cache = NULL;
4748         int ret;
4749
4750         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4751                 ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len,
4752                                                 parent, root->root_key.objectid,
4753                                                 btrfs_header_level(buf),
4754                                                 BTRFS_DROP_DELAYED_REF, NULL);
4755                 BUG_ON(ret);
4756         }
4757
4758         if (!last_ref)
4759                 return;
4760
4761         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
4762
4763         if (btrfs_header_generation(buf) == trans->transid) {
4764                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4765                         ret = check_ref_cleanup(trans, root, buf->start);
4766                         if (!ret)
4767                                 goto out;
4768                 }
4769
4770                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
4771                         pin_down_extent(root, cache, buf->start, buf->len, 1);
4772                         goto out;
4773                 }
4774
4775                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
4776
4777                 btrfs_add_free_space(cache, buf->start, buf->len);
4778                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
4779         }
4780 out:
4781         /*
4782          * Deleting the buffer, clear the corrupt flag since it doesn't matter
4783          * anymore.
4784          */
4785         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
4786         btrfs_put_block_group(cache);
4787 }
4788
4789 int btrfs_free_extent(struct btrfs_trans_handle *trans,
4790                       struct btrfs_root *root,
4791                       u64 bytenr, u64 num_bytes, u64 parent,
4792                       u64 root_objectid, u64 owner, u64 offset)
4793 {
4794         int ret;
4795
4796         /*
4797          * tree log blocks never actually go into the extent allocation
4798          * tree, just update pinning info and exit early.
4799          */
4800         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
4801                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
4802                 /* unlocks the pinned mutex */
4803                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
4804                 ret = 0;
4805         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
4806                 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
4807                                         parent, root_objectid, (int)owner,
4808                                         BTRFS_DROP_DELAYED_REF, NULL);
4809                 BUG_ON(ret);
4810         } else {
4811                 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
4812                                         parent, root_objectid, owner,
4813                                         offset, BTRFS_DROP_DELAYED_REF, NULL);
4814                 BUG_ON(ret);
4815         }
4816         return ret;
4817 }
4818
4819 static u64 stripe_align(struct btrfs_root *root, u64 val)
4820 {
4821         u64 mask = ((u64)root->stripesize - 1);
4822         u64 ret = (val + mask) & ~mask;
4823         return ret;
4824 }
4825
4826 /*
4827  * when we wait for progress in the block group caching, its because
4828  * our allocation attempt failed at least once.  So, we must sleep
4829  * and let some progress happen before we try again.
4830  *
4831  * This function will sleep at least once waiting for new free space to
4832  * show up, and then it will check the block group free space numbers
4833  * for our min num_bytes.  Another option is to have it go ahead
4834  * and look in the rbtree for a free extent of a given size, but this
4835  * is a good start.
4836  */
4837 static noinline int
4838 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
4839                                 u64 num_bytes)
4840 {
4841         struct btrfs_caching_control *caching_ctl;
4842         DEFINE_WAIT(wait);
4843
4844         caching_ctl = get_caching_control(cache);
4845         if (!caching_ctl)
4846                 return 0;
4847
4848         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
4849                    (cache->free_space_ctl->free_space >= num_bytes));
4850
4851         put_caching_control(caching_ctl);
4852         return 0;
4853 }
4854
4855 static noinline int
4856 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
4857 {
4858         struct btrfs_caching_control *caching_ctl;
4859         DEFINE_WAIT(wait);
4860
4861         caching_ctl = get_caching_control(cache);
4862         if (!caching_ctl)
4863                 return 0;
4864
4865         wait_event(caching_ctl->wait, block_group_cache_done(cache));
4866
4867         put_caching_control(caching_ctl);
4868         return 0;
4869 }
4870
4871 static int get_block_group_index(struct btrfs_block_group_cache *cache)
4872 {
4873         int index;
4874         if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
4875                 index = 0;
4876         else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
4877                 index = 1;
4878         else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
4879                 index = 2;
4880         else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
4881                 index = 3;
4882         else
4883                 index = 4;
4884         return index;
4885 }
4886
4887 enum btrfs_loop_type {
4888         LOOP_FIND_IDEAL = 0,
4889         LOOP_CACHING_NOWAIT = 1,
4890         LOOP_CACHING_WAIT = 2,
4891         LOOP_ALLOC_CHUNK = 3,
4892         LOOP_NO_EMPTY_SIZE = 4,
4893 };
4894
4895 /*
4896  * walks the btree of allocated extents and find a hole of a given size.
4897  * The key ins is changed to record the hole:
4898  * ins->objectid == block start
4899  * ins->flags = BTRFS_EXTENT_ITEM_KEY
4900  * ins->offset == number of blocks
4901  * Any available blocks before search_start are skipped.
4902  */
4903 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4904                                      struct btrfs_root *orig_root,
4905                                      u64 num_bytes, u64 empty_size,
4906                                      u64 search_start, u64 search_end,
4907                                      u64 hint_byte, struct btrfs_key *ins,
4908                                      u64 data)
4909 {
4910         int ret = 0;
4911         struct btrfs_root *root = orig_root->fs_info->extent_root;
4912         struct btrfs_free_cluster *last_ptr = NULL;
4913         struct btrfs_block_group_cache *block_group = NULL;
4914         int empty_cluster = 2 * 1024 * 1024;
4915         int allowed_chunk_alloc = 0;
4916         int done_chunk_alloc = 0;
4917         struct btrfs_space_info *space_info;
4918         int last_ptr_loop = 0;
4919         int loop = 0;
4920         int index = 0;
4921         int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
4922                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
4923         bool found_uncached_bg = false;
4924         bool failed_cluster_refill = false;
4925         bool failed_alloc = false;
4926         bool use_cluster = true;
4927         u64 ideal_cache_percent = 0;
4928         u64 ideal_cache_offset = 0;
4929
4930         WARN_ON(num_bytes < root->sectorsize);
4931         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
4932         ins->objectid = 0;
4933         ins->offset = 0;
4934
4935         space_info = __find_space_info(root->fs_info, data);
4936         if (!space_info) {
4937                 printk(KERN_ERR "No space info for %llu\n", data);
4938                 return -ENOSPC;
4939         }
4940
4941         /*
4942          * If the space info is for both data and metadata it means we have a
4943          * small filesystem and we can't use the clustering stuff.
4944          */
4945         if (btrfs_mixed_space_info(space_info))
4946                 use_cluster = false;
4947
4948         if (orig_root->ref_cows || empty_size)
4949                 allowed_chunk_alloc = 1;
4950
4951         if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
4952                 last_ptr = &root->fs_info->meta_alloc_cluster;
4953                 if (!btrfs_test_opt(root, SSD))
4954                         empty_cluster = 64 * 1024;
4955         }
4956
4957         if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
4958             btrfs_test_opt(root, SSD)) {
4959                 last_ptr = &root->fs_info->data_alloc_cluster;
4960         }
4961
4962         if (last_ptr) {
4963                 spin_lock(&last_ptr->lock);
4964                 if (last_ptr->block_group)
4965                         hint_byte = last_ptr->window_start;
4966                 spin_unlock(&last_ptr->lock);
4967         }
4968
4969         search_start = max(search_start, first_logical_byte(root, 0));
4970         search_start = max(search_start, hint_byte);
4971
4972         if (!last_ptr)
4973                 empty_cluster = 0;
4974
4975         if (search_start == hint_byte) {
4976 ideal_cache:
4977                 block_group = btrfs_lookup_block_group(root->fs_info,
4978                                                        search_start);
4979                 /*
4980                  * we don't want to use the block group if it doesn't match our
4981                  * allocation bits, or if its not cached.
4982                  *
4983                  * However if we are re-searching with an ideal block group
4984                  * picked out then we don't care that the block group is cached.
4985                  */
4986                 if (block_group && block_group_bits(block_group, data) &&
4987                     (block_group->cached != BTRFS_CACHE_NO ||
4988                      search_start == ideal_cache_offset)) {
4989                         down_read(&space_info->groups_sem);
4990                         if (list_empty(&block_group->list) ||
4991                             block_group->ro) {
4992                                 /*
4993                                  * someone is removing this block group,
4994                                  * we can't jump into the have_block_group
4995                                  * target because our list pointers are not
4996                                  * valid
4997                                  */
4998                                 btrfs_put_block_group(block_group);
4999                                 up_read(&space_info->groups_sem);
5000                         } else {
5001                                 index = get_block_group_index(block_group);
5002                                 goto have_block_group;
5003                         }
5004                 } else if (block_group) {
5005                         btrfs_put_block_group(block_group);
5006                 }
5007         }
5008 search:
5009         down_read(&space_info->groups_sem);
5010         list_for_each_entry(block_group, &space_info->block_groups[index],
5011                             list) {
5012                 u64 offset;
5013                 int cached;
5014
5015                 btrfs_get_block_group(block_group);
5016                 search_start = block_group->key.objectid;
5017
5018                 /*
5019                  * this can happen if we end up cycling through all the
5020                  * raid types, but we want to make sure we only allocate
5021                  * for the proper type.
5022                  */
5023                 if (!block_group_bits(block_group, data)) {
5024                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
5025                                 BTRFS_BLOCK_GROUP_RAID1 |
5026                                 BTRFS_BLOCK_GROUP_RAID10;
5027
5028                         /*
5029                          * if they asked for extra copies and this block group
5030                          * doesn't provide them, bail.  This does allow us to
5031                          * fill raid0 from raid1.
5032                          */
5033                         if ((data & extra) && !(block_group->flags & extra))
5034                                 goto loop;
5035                 }
5036
5037 have_block_group:
5038                 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
5039                         u64 free_percent;
5040
5041                         ret = cache_block_group(block_group, trans,
5042                                                 orig_root, 1);
5043                         if (block_group->cached == BTRFS_CACHE_FINISHED)
5044                                 goto have_block_group;
5045
5046                         free_percent = btrfs_block_group_used(&block_group->item);
5047                         free_percent *= 100;
5048                         free_percent = div64_u64(free_percent,
5049                                                  block_group->key.offset);
5050                         free_percent = 100 - free_percent;
5051                         if (free_percent > ideal_cache_percent &&
5052                             likely(!block_group->ro)) {
5053                                 ideal_cache_offset = block_group->key.objectid;
5054                                 ideal_cache_percent = free_percent;
5055                         }
5056
5057                         /*
5058                          * The caching workers are limited to 2 threads, so we
5059                          * can queue as much work as we care to.
5060                          */
5061                         if (loop > LOOP_FIND_IDEAL) {
5062                                 ret = cache_block_group(block_group, trans,
5063                                                         orig_root, 0);
5064                                 BUG_ON(ret);
5065                         }
5066                         found_uncached_bg = true;
5067
5068                         /*
5069                          * If loop is set for cached only, try the next block
5070                          * group.
5071                          */
5072                         if (loop == LOOP_FIND_IDEAL)
5073                                 goto loop;
5074                 }
5075
5076                 cached = block_group_cache_done(block_group);
5077                 if (unlikely(!cached))
5078                         found_uncached_bg = true;
5079
5080                 if (unlikely(block_group->ro))
5081                         goto loop;
5082
5083                 spin_lock(&block_group->free_space_ctl->tree_lock);
5084                 if (cached &&
5085                     block_group->free_space_ctl->free_space <
5086                     num_bytes + empty_size) {
5087                         spin_unlock(&block_group->free_space_ctl->tree_lock);
5088                         goto loop;
5089                 }
5090                 spin_unlock(&block_group->free_space_ctl->tree_lock);
5091
5092                 /*
5093                  * Ok we want to try and use the cluster allocator, so lets look
5094                  * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
5095                  * have tried the cluster allocator plenty of times at this
5096                  * point and not have found anything, so we are likely way too
5097                  * fragmented for the clustering stuff to find anything, so lets
5098                  * just skip it and let the allocator find whatever block it can
5099                  * find
5100                  */
5101                 if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
5102                         /*
5103                          * the refill lock keeps out other
5104                          * people trying to start a new cluster
5105                          */
5106                         spin_lock(&last_ptr->refill_lock);
5107                         if (last_ptr->block_group &&
5108                             (last_ptr->block_group->ro ||
5109                             !block_group_bits(last_ptr->block_group, data))) {
5110                                 offset = 0;
5111                                 goto refill_cluster;
5112                         }
5113
5114                         offset = btrfs_alloc_from_cluster(block_group, last_ptr,
5115                                                  num_bytes, search_start);
5116                         if (offset) {
5117                                 /* we have a block, we're done */
5118                                 spin_unlock(&last_ptr->refill_lock);
5119                                 goto checks;
5120                         }
5121
5122                         spin_lock(&last_ptr->lock);
5123                         /*
5124                          * whoops, this cluster doesn't actually point to
5125                          * this block group.  Get a ref on the block
5126                          * group is does point to and try again
5127                          */
5128                         if (!last_ptr_loop && last_ptr->block_group &&
5129                             last_ptr->block_group != block_group &&
5130                             index <=
5131                                  get_block_group_index(last_ptr->block_group)) {
5132
5133                                 btrfs_put_block_group(block_group);
5134                                 block_group = last_ptr->block_group;
5135                                 btrfs_get_block_group(block_group);
5136                                 spin_unlock(&last_ptr->lock);
5137                                 spin_unlock(&last_ptr->refill_lock);
5138
5139                                 last_ptr_loop = 1;
5140                                 search_start = block_group->key.objectid;
5141                                 /*
5142                                  * we know this block group is properly
5143                                  * in the list because
5144                                  * btrfs_remove_block_group, drops the
5145                                  * cluster before it removes the block
5146                                  * group from the list
5147                                  */
5148                                 goto have_block_group;
5149                         }
5150                         spin_unlock(&last_ptr->lock);
5151 refill_cluster:
5152                         /*
5153                          * this cluster didn't work out, free it and
5154                          * start over
5155                          */
5156                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5157
5158                         last_ptr_loop = 0;
5159
5160                         /* allocate a cluster in this block group */
5161                         ret = btrfs_find_space_cluster(trans, root,
5162                                                block_group, last_ptr,
5163                                                offset, num_bytes,
5164                                                empty_cluster + empty_size);
5165                         if (ret == 0) {
5166                                 /*
5167                                  * now pull our allocation out of this
5168                                  * cluster
5169                                  */
5170                                 offset = btrfs_alloc_from_cluster(block_group,
5171                                                   last_ptr, num_bytes,
5172                                                   search_start);
5173                                 if (offset) {
5174                                         /* we found one, proceed */
5175                                         spin_unlock(&last_ptr->refill_lock);
5176                                         goto checks;
5177                                 }
5178                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
5179                                    && !failed_cluster_refill) {
5180                                 spin_unlock(&last_ptr->refill_lock);
5181
5182                                 failed_cluster_refill = true;
5183                                 wait_block_group_cache_progress(block_group,
5184                                        num_bytes + empty_cluster + empty_size);
5185                                 goto have_block_group;
5186                         }
5187
5188                         /*
5189                          * at this point we either didn't find a cluster
5190                          * or we weren't able to allocate a block from our
5191                          * cluster.  Free the cluster we've been trying
5192                          * to use, and go to the next block group
5193                          */
5194                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5195                         spin_unlock(&last_ptr->refill_lock);
5196                         goto loop;
5197                 }
5198
5199                 offset = btrfs_find_space_for_alloc(block_group, search_start,
5200                                                     num_bytes, empty_size);
5201                 /*
5202                  * If we didn't find a chunk, and we haven't failed on this
5203                  * block group before, and this block group is in the middle of
5204                  * caching and we are ok with waiting, then go ahead and wait
5205                  * for progress to be made, and set failed_alloc to true.
5206                  *
5207                  * If failed_alloc is true then we've already waited on this
5208                  * block group once and should move on to the next block group.
5209                  */
5210                 if (!offset && !failed_alloc && !cached &&
5211                     loop > LOOP_CACHING_NOWAIT) {
5212                         wait_block_group_cache_progress(block_group,
5213                                                 num_bytes + empty_size);
5214                         failed_alloc = true;
5215                         goto have_block_group;
5216                 } else if (!offset) {
5217                         goto loop;
5218                 }
5219 checks:
5220                 search_start = stripe_align(root, offset);
5221                 /* move on to the next group */
5222                 if (search_start + num_bytes >= search_end) {
5223                         btrfs_add_free_space(block_group, offset, num_bytes);
5224                         goto loop;
5225                 }
5226
5227                 /* move on to the next group */
5228                 if (search_start + num_bytes >
5229                     block_group->key.objectid + block_group->key.offset) {
5230                         btrfs_add_free_space(block_group, offset, num_bytes);
5231                         goto loop;
5232                 }
5233
5234                 ins->objectid = search_start;
5235                 ins->offset = num_bytes;
5236
5237                 if (offset < search_start)
5238                         btrfs_add_free_space(block_group, offset,
5239                                              search_start - offset);
5240                 BUG_ON(offset > search_start);
5241
5242                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
5243                                                   alloc_type);
5244                 if (ret == -EAGAIN) {
5245                         btrfs_add_free_space(block_group, offset, num_bytes);
5246                         goto loop;
5247                 }
5248
5249                 /* we are all good, lets return */
5250                 ins->objectid = search_start;
5251                 ins->offset = num_bytes;
5252
5253                 if (offset < search_start)
5254                         btrfs_add_free_space(block_group, offset,
5255                                              search_start - offset);
5256                 BUG_ON(offset > search_start);
5257                 btrfs_put_block_group(block_group);
5258                 break;
5259 loop:
5260                 failed_cluster_refill = false;
5261                 failed_alloc = false;
5262                 BUG_ON(index != get_block_group_index(block_group));
5263                 btrfs_put_block_group(block_group);
5264         }
5265         up_read(&space_info->groups_sem);
5266
5267         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5268                 goto search;
5269
5270         /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
5271          *                      for them to make caching progress.  Also
5272          *                      determine the best possible bg to cache
5273          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5274          *                      caching kthreads as we move along
5275          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5276          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5277          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5278          *                      again
5279          */
5280         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
5281                 index = 0;
5282                 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
5283                         found_uncached_bg = false;
5284                         loop++;
5285                         if (!ideal_cache_percent)
5286                                 goto search;
5287
5288                         /*
5289                          * 1 of the following 2 things have happened so far
5290                          *
5291                          * 1) We found an ideal block group for caching that
5292                          * is mostly full and will cache quickly, so we might
5293                          * as well wait for it.
5294                          *
5295                          * 2) We searched for cached only and we didn't find
5296                          * anything, and we didn't start any caching kthreads
5297                          * either, so chances are we will loop through and
5298                          * start a couple caching kthreads, and then come back
5299                          * around and just wait for them.  This will be slower
5300                          * because we will have 2 caching kthreads reading at
5301                          * the same time when we could have just started one
5302                          * and waited for it to get far enough to give us an
5303                          * allocation, so go ahead and go to the wait caching
5304                          * loop.
5305                          */
5306                         loop = LOOP_CACHING_WAIT;
5307                         search_start = ideal_cache_offset;
5308                         ideal_cache_percent = 0;
5309                         goto ideal_cache;
5310                 } else if (loop == LOOP_FIND_IDEAL) {
5311                         /*
5312                          * Didn't find a uncached bg, wait on anything we find
5313                          * next.
5314                          */
5315                         loop = LOOP_CACHING_WAIT;
5316                         goto search;
5317                 }
5318
5319                 loop++;
5320
5321                 if (loop == LOOP_ALLOC_CHUNK) {
5322                        if (allowed_chunk_alloc) {
5323                                 ret = do_chunk_alloc(trans, root, num_bytes +
5324                                                      2 * 1024 * 1024, data,
5325                                                      CHUNK_ALLOC_LIMITED);
5326                                 allowed_chunk_alloc = 0;
5327                                 if (ret == 1)
5328                                         done_chunk_alloc = 1;
5329                         } else if (!done_chunk_alloc &&
5330                                    space_info->force_alloc ==
5331                                    CHUNK_ALLOC_NO_FORCE) {
5332                                 space_info->force_alloc = CHUNK_ALLOC_LIMITED;
5333                         }
5334
5335                        /*
5336                         * We didn't allocate a chunk, go ahead and drop the
5337                         * empty size and loop again.
5338                         */
5339                        if (!done_chunk_alloc)
5340                                loop = LOOP_NO_EMPTY_SIZE;
5341                 }
5342
5343                 if (loop == LOOP_NO_EMPTY_SIZE) {
5344                         empty_size = 0;
5345                         empty_cluster = 0;
5346                 }
5347
5348                 goto search;
5349         } else if (!ins->objectid) {
5350                 ret = -ENOSPC;
5351         } else if (ins->objectid) {
5352                 ret = 0;
5353         }
5354
5355         return ret;
5356 }
5357
5358 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
5359                             int dump_block_groups)
5360 {
5361         struct btrfs_block_group_cache *cache;
5362         int index = 0;
5363
5364         spin_lock(&info->lock);
5365         printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
5366                (unsigned long long)info->flags,
5367                (unsigned long long)(info->total_bytes - info->bytes_used -
5368                                     info->bytes_pinned - info->bytes_reserved -
5369                                     info->bytes_readonly),
5370                (info->full) ? "" : "not ");
5371         printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
5372                "reserved=%llu, may_use=%llu, readonly=%llu\n",
5373                (unsigned long long)info->total_bytes,
5374                (unsigned long long)info->bytes_used,
5375                (unsigned long long)info->bytes_pinned,
5376                (unsigned long long)info->bytes_reserved,
5377                (unsigned long long)info->bytes_may_use,
5378                (unsigned long long)info->bytes_readonly);
5379         spin_unlock(&info->lock);
5380
5381         if (!dump_block_groups)
5382                 return;
5383
5384         down_read(&info->groups_sem);
5385 again:
5386         list_for_each_entry(cache, &info->block_groups[index], list) {
5387                 spin_lock(&cache->lock);
5388                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
5389                        "%llu pinned %llu reserved\n",
5390                        (unsigned long long)cache->key.objectid,
5391                        (unsigned long long)cache->key.offset,
5392                        (unsigned long long)btrfs_block_group_used(&cache->item),
5393                        (unsigned long long)cache->pinned,
5394                        (unsigned long long)cache->reserved);
5395                 btrfs_dump_free_space(cache, bytes);
5396                 spin_unlock(&cache->lock);
5397         }
5398         if (++index < BTRFS_NR_RAID_TYPES)
5399                 goto again;
5400         up_read(&info->groups_sem);
5401 }
5402
5403 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5404                          struct btrfs_root *root,
5405                          u64 num_bytes, u64 min_alloc_size,
5406                          u64 empty_size, u64 hint_byte,
5407                          u64 search_end, struct btrfs_key *ins,
5408                          u64 data)
5409 {
5410         int ret;
5411         u64 search_start = 0;
5412
5413         data = btrfs_get_alloc_profile(root, data);
5414 again:
5415         /*
5416          * the only place that sets empty_size is btrfs_realloc_node, which
5417          * is not called recursively on allocations
5418          */
5419         if (empty_size || root->ref_cows)
5420                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5421                                      num_bytes + 2 * 1024 * 1024, data,
5422                                      CHUNK_ALLOC_NO_FORCE);
5423
5424         WARN_ON(num_bytes < root->sectorsize);
5425         ret = find_free_extent(trans, root, num_bytes, empty_size,
5426                                search_start, search_end, hint_byte,
5427                                ins, data);
5428
5429         if (ret == -ENOSPC && num_bytes > min_alloc_size) {
5430                 num_bytes = num_bytes >> 1;
5431                 num_bytes = num_bytes & ~(root->sectorsize - 1);
5432                 num_bytes = max(num_bytes, min_alloc_size);
5433                 do_chunk_alloc(trans, root->fs_info->extent_root,
5434                                num_bytes, data, CHUNK_ALLOC_FORCE);
5435                 goto again;
5436         }
5437         if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
5438                 struct btrfs_space_info *sinfo;
5439
5440                 sinfo = __find_space_info(root->fs_info, data);
5441                 printk(KERN_ERR "btrfs allocation failed flags %llu, "
5442                        "wanted %llu\n", (unsigned long long)data,
5443                        (unsigned long long)num_bytes);
5444                 dump_space_info(sinfo, num_bytes, 1);
5445         }
5446
5447         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
5448
5449         return ret;
5450 }
5451
5452 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
5453 {
5454         struct btrfs_block_group_cache *cache;
5455         int ret = 0;
5456
5457         cache = btrfs_lookup_block_group(root->fs_info, start);
5458         if (!cache) {
5459                 printk(KERN_ERR "Unable to find block group for %llu\n",
5460                        (unsigned long long)start);
5461                 return -ENOSPC;
5462         }
5463
5464         if (btrfs_test_opt(root, DISCARD))
5465                 ret = btrfs_discard_extent(root, start, len, NULL);
5466
5467         btrfs_add_free_space(cache, start, len);
5468         btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
5469         btrfs_put_block_group(cache);
5470
5471         trace_btrfs_reserved_extent_free(root, start, len);
5472
5473         return ret;
5474 }
5475
5476 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5477                                       struct btrfs_root *root,
5478                                       u64 parent, u64 root_objectid,
5479                                       u64 flags, u64 owner, u64 offset,
5480                                       struct btrfs_key *ins, int ref_mod)
5481 {
5482         int ret;
5483         struct btrfs_fs_info *fs_info = root->fs_info;
5484         struct btrfs_extent_item *extent_item;
5485         struct btrfs_extent_inline_ref *iref;
5486         struct btrfs_path *path;
5487         struct extent_buffer *leaf;
5488         int type;
5489         u32 size;
5490
5491         if (parent > 0)
5492                 type = BTRFS_SHARED_DATA_REF_KEY;
5493         else
5494                 type = BTRFS_EXTENT_DATA_REF_KEY;
5495
5496         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
5497
5498         path = btrfs_alloc_path();
5499         if (!path)
5500                 return -ENOMEM;
5501
5502         path->leave_spinning = 1;
5503         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5504                                       ins, size);
5505         BUG_ON(ret);
5506
5507         leaf = path->nodes[0];
5508         extent_item = btrfs_item_ptr(leaf, path->slots[0],
5509                                      struct btrfs_extent_item);
5510         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
5511         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5512         btrfs_set_extent_flags(leaf, extent_item,
5513                                flags | BTRFS_EXTENT_FLAG_DATA);
5514
5515         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
5516         btrfs_set_extent_inline_ref_type(leaf, iref, type);
5517         if (parent > 0) {
5518                 struct btrfs_shared_data_ref *ref;
5519                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
5520                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5521                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
5522         } else {
5523                 struct btrfs_extent_data_ref *ref;
5524                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
5525                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
5526                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
5527                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
5528                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
5529         }
5530
5531         btrfs_mark_buffer_dirty(path->nodes[0]);
5532         btrfs_free_path(path);
5533
5534         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5535         if (ret) {
5536                 printk(KERN_ERR "btrfs update block group failed for %llu "
5537                        "%llu\n", (unsigned long long)ins->objectid,
5538                        (unsigned long long)ins->offset);
5539                 BUG();
5540         }
5541         return ret;
5542 }
5543
5544 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
5545                                      struct btrfs_root *root,
5546                                      u64 parent, u64 root_objectid,
5547                                      u64 flags, struct btrfs_disk_key *key,
5548                                      int level, struct btrfs_key *ins)
5549 {
5550         int ret;
5551         struct btrfs_fs_info *fs_info = root->fs_info;
5552         struct btrfs_extent_item *extent_item;
5553         struct btrfs_tree_block_info *block_info;
5554         struct btrfs_extent_inline_ref *iref;
5555         struct btrfs_path *path;
5556         struct extent_buffer *leaf;
5557         u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
5558
5559         path = btrfs_alloc_path();
5560         if (!path)
5561                 return -ENOMEM;
5562
5563         path->leave_spinning = 1;
5564         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5565                                       ins, size);
5566         BUG_ON(ret);
5567
5568         leaf = path->nodes[0];
5569         extent_item = btrfs_item_ptr(leaf, path->slots[0],
5570                                      struct btrfs_extent_item);
5571         btrfs_set_extent_refs(leaf, extent_item, 1);
5572         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5573         btrfs_set_extent_flags(leaf, extent_item,
5574                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
5575         block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
5576
5577         btrfs_set_tree_block_key(leaf, block_info, key);
5578         btrfs_set_tree_block_level(leaf, block_info, level);
5579
5580         iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
5581         if (parent > 0) {
5582                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
5583                 btrfs_set_extent_inline_ref_type(leaf, iref,
5584                                                  BTRFS_SHARED_BLOCK_REF_KEY);
5585                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5586         } else {
5587                 btrfs_set_extent_inline_ref_type(leaf, iref,
5588                                                  BTRFS_TREE_BLOCK_REF_KEY);
5589                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
5590         }
5591
5592         btrfs_mark_buffer_dirty(leaf);
5593         btrfs_free_path(path);
5594
5595         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5596         if (ret) {
5597                 printk(KERN_ERR "btrfs update block group failed for %llu "
5598                        "%llu\n", (unsigned long long)ins->objectid,
5599                        (unsigned long long)ins->offset);
5600                 BUG();
5601         }
5602         return ret;
5603 }
5604
5605 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5606                                      struct btrfs_root *root,
5607                                      u64 root_objectid, u64 owner,
5608                                      u64 offset, struct btrfs_key *ins)
5609 {
5610         int ret;
5611
5612         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
5613
5614         ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
5615                                          0, root_objectid, owner, offset,
5616                                          BTRFS_ADD_DELAYED_EXTENT, NULL);
5617         return ret;
5618 }
5619
5620 /*
5621  * this is used by the tree logging recovery code.  It records that
5622  * an extent has been allocated and makes sure to clear the free
5623  * space cache bits as well
5624  */
5625 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
5626                                    struct btrfs_root *root,
5627                                    u64 root_objectid, u64 owner, u64 offset,
5628                                    struct btrfs_key *ins)
5629 {
5630         int ret;
5631         struct btrfs_block_group_cache *block_group;
5632         struct btrfs_caching_control *caching_ctl;
5633         u64 start = ins->objectid;
5634         u64 num_bytes = ins->offset;
5635
5636         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
5637         cache_block_group(block_group, trans, NULL, 0);
5638         caching_ctl = get_caching_control(block_group);
5639
5640         if (!caching_ctl) {
5641                 BUG_ON(!block_group_cache_done(block_group));
5642                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5643                 BUG_ON(ret);
5644         } else {
5645                 mutex_lock(&caching_ctl->mutex);
5646
5647                 if (start >= caching_ctl->progress) {
5648                         ret = add_excluded_extent(root, start, num_bytes);
5649                         BUG_ON(ret);
5650                 } else if (start + num_bytes <= caching_ctl->progress) {
5651                         ret = btrfs_remove_free_space(block_group,
5652                                                       start, num_bytes);
5653                         BUG_ON(ret);
5654                 } else {
5655                         num_bytes = caching_ctl->progress - start;
5656                         ret = btrfs_remove_free_space(block_group,
5657                                                       start, num_bytes);
5658                         BUG_ON(ret);
5659
5660                         start = caching_ctl->progress;
5661                         num_bytes = ins->objectid + ins->offset -
5662                                     caching_ctl->progress;
5663                         ret = add_excluded_extent(root, start, num_bytes);
5664                         BUG_ON(ret);
5665                 }
5666
5667                 mutex_unlock(&caching_ctl->mutex);
5668                 put_caching_control(caching_ctl);
5669         }
5670
5671         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
5672                                           RESERVE_ALLOC_NO_ACCOUNT);
5673         BUG_ON(ret);
5674         btrfs_put_block_group(block_group);
5675         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
5676                                          0, owner, offset, ins, 1);
5677         return ret;
5678 }
5679
5680 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
5681                                             struct btrfs_root *root,
5682                                             u64 bytenr, u32 blocksize,
5683                                             int level)
5684 {
5685         struct extent_buffer *buf;
5686
5687         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
5688         if (!buf)
5689                 return ERR_PTR(-ENOMEM);
5690         btrfs_set_header_generation(buf, trans->transid);
5691         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
5692         btrfs_tree_lock(buf);
5693         clean_tree_block(trans, root, buf);
5694
5695         btrfs_set_lock_blocking(buf);
5696         btrfs_set_buffer_uptodate(buf);
5697
5698         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
5699                 /*
5700                  * we allow two log transactions at a time, use different
5701                  * EXENT bit to differentiate dirty pages.
5702                  */
5703                 if (root->log_transid % 2 == 0)
5704                         set_extent_dirty(&root->dirty_log_pages, buf->start,
5705                                         buf->start + buf->len - 1, GFP_NOFS);
5706                 else
5707                         set_extent_new(&root->dirty_log_pages, buf->start,
5708                                         buf->start + buf->len - 1, GFP_NOFS);
5709         } else {
5710                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
5711                          buf->start + buf->len - 1, GFP_NOFS);
5712         }
5713         trans->blocks_used++;
5714         /* this returns a buffer locked for blocking */
5715         return buf;
5716 }
5717
5718 static struct btrfs_block_rsv *
5719 use_block_rsv(struct btrfs_trans_handle *trans,
5720               struct btrfs_root *root, u32 blocksize)
5721 {
5722         struct btrfs_block_rsv *block_rsv;
5723         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5724         int ret;
5725
5726         block_rsv = get_block_rsv(trans, root);
5727
5728         if (block_rsv->size == 0) {
5729                 ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0, 0);
5730                 /*
5731                  * If we couldn't reserve metadata bytes try and use some from
5732                  * the global reserve.
5733                  */
5734                 if (ret && block_rsv != global_rsv) {
5735                         ret = block_rsv_use_bytes(global_rsv, blocksize);
5736                         if (!ret)
5737                                 return global_rsv;
5738                         return ERR_PTR(ret);
5739                 } else if (ret) {
5740                         return ERR_PTR(ret);
5741                 }
5742                 return block_rsv;
5743         }
5744
5745         ret = block_rsv_use_bytes(block_rsv, blocksize);
5746         if (!ret)
5747                 return block_rsv;
5748         if (ret) {
5749                 WARN_ON(1);
5750                 ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0, 0);
5751                 if (!ret) {
5752                         return block_rsv;
5753                 } else if (ret && block_rsv != global_rsv) {
5754                         ret = block_rsv_use_bytes(global_rsv, blocksize);
5755                         if (!ret)
5756                                 return global_rsv;
5757                 }
5758         }
5759
5760         return ERR_PTR(-ENOSPC);
5761 }
5762
5763 static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize)
5764 {
5765         block_rsv_add_bytes(block_rsv, blocksize, 0);
5766         block_rsv_release_bytes(block_rsv, NULL, 0);
5767 }
5768
5769 /*
5770  * finds a free extent and does all the dirty work required for allocation
5771  * returns the key for the extent through ins, and a tree buffer for
5772  * the first block of the extent through buf.
5773  *
5774  * returns the tree buffer or NULL.
5775  */
5776 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
5777                                         struct btrfs_root *root, u32 blocksize,
5778                                         u64 parent, u64 root_objectid,
5779                                         struct btrfs_disk_key *key, int level,
5780                                         u64 hint, u64 empty_size)
5781 {
5782         struct btrfs_key ins;
5783         struct btrfs_block_rsv *block_rsv;
5784         struct extent_buffer *buf;
5785         u64 flags = 0;
5786         int ret;
5787
5788
5789         block_rsv = use_block_rsv(trans, root, blocksize);
5790         if (IS_ERR(block_rsv))
5791                 return ERR_CAST(block_rsv);
5792
5793         ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
5794                                    empty_size, hint, (u64)-1, &ins, 0);
5795         if (ret) {
5796                 unuse_block_rsv(block_rsv, blocksize);
5797                 return ERR_PTR(ret);
5798         }
5799
5800         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
5801                                     blocksize, level);
5802         BUG_ON(IS_ERR(buf));
5803
5804         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
5805                 if (parent == 0)
5806                         parent = ins.objectid;
5807                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
5808         } else
5809                 BUG_ON(parent > 0);
5810
5811         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
5812                 struct btrfs_delayed_extent_op *extent_op;
5813                 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
5814                 BUG_ON(!extent_op);
5815                 if (key)
5816                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
5817                 else
5818                         memset(&extent_op->key, 0, sizeof(extent_op->key));
5819                 extent_op->flags_to_set = flags;
5820                 extent_op->update_key = 1;
5821                 extent_op->update_flags = 1;
5822                 extent_op->is_data = 0;
5823
5824                 ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
5825                                         ins.offset, parent, root_objectid,
5826                                         level, BTRFS_ADD_DELAYED_EXTENT,
5827                                         extent_op);
5828                 BUG_ON(ret);
5829         }
5830         return buf;
5831 }
5832
5833 struct walk_control {
5834         u64 refs[BTRFS_MAX_LEVEL];
5835         u64 flags[BTRFS_MAX_LEVEL];
5836         struct btrfs_key update_progress;
5837         int stage;
5838         int level;
5839         int shared_level;
5840         int update_ref;
5841         int keep_locks;
5842         int reada_slot;
5843         int reada_count;
5844 };
5845
5846 #define DROP_REFERENCE  1
5847 #define UPDATE_BACKREF  2
5848
5849 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
5850                                      struct btrfs_root *root,
5851                                      struct walk_control *wc,
5852                                      struct btrfs_path *path)
5853 {
5854         u64 bytenr;
5855         u64 generation;
5856         u64 refs;
5857         u64 flags;
5858         u32 nritems;
5859         u32 blocksize;
5860         struct btrfs_key key;
5861         struct extent_buffer *eb;
5862         int ret;
5863         int slot;
5864         int nread = 0;
5865
5866         if (path->slots[wc->level] < wc->reada_slot) {
5867                 wc->reada_count = wc->reada_count * 2 / 3;
5868                 wc->reada_count = max(wc->reada_count, 2);
5869         } else {
5870                 wc->reada_count = wc->reada_count * 3 / 2;
5871                 wc->reada_count = min_t(int, wc->reada_count,
5872                                         BTRFS_NODEPTRS_PER_BLOCK(root));
5873         }
5874
5875         eb = path->nodes[wc->level];
5876         nritems = btrfs_header_nritems(eb);
5877         blocksize = btrfs_level_size(root, wc->level - 1);
5878
5879         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
5880                 if (nread >= wc->reada_count)
5881                         break;
5882
5883                 cond_resched();
5884                 bytenr = btrfs_node_blockptr(eb, slot);
5885                 generation = btrfs_node_ptr_generation(eb, slot);
5886
5887                 if (slot == path->slots[wc->level])
5888                         goto reada;
5889
5890                 if (wc->stage == UPDATE_BACKREF &&
5891                     generation <= root->root_key.offset)
5892                         continue;
5893
5894                 /* We don't lock the tree block, it's OK to be racy here */
5895                 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5896                                                &refs, &flags);
5897                 BUG_ON(ret);
5898                 BUG_ON(refs == 0);
5899
5900                 if (wc->stage == DROP_REFERENCE) {
5901                         if (refs == 1)
5902                                 goto reada;
5903
5904                         if (wc->level == 1 &&
5905                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5906                                 continue;
5907                         if (!wc->update_ref ||
5908                             generation <= root->root_key.offset)
5909                                 continue;
5910                         btrfs_node_key_to_cpu(eb, &key, slot);
5911                         ret = btrfs_comp_cpu_keys(&key,
5912                                                   &wc->update_progress);
5913                         if (ret < 0)
5914                                 continue;
5915                 } else {
5916                         if (wc->level == 1 &&
5917                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5918                                 continue;
5919                 }
5920 reada:
5921                 ret = readahead_tree_block(root, bytenr, blocksize,
5922                                            generation);
5923                 if (ret)
5924                         break;
5925                 nread++;
5926         }
5927         wc->reada_slot = slot;
5928 }
5929
5930 /*
5931  * hepler to process tree block while walking down the tree.
5932  *
5933  * when wc->stage == UPDATE_BACKREF, this function updates
5934  * back refs for pointers in the block.
5935  *
5936  * NOTE: return value 1 means we should stop walking down.
5937  */
5938 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5939                                    struct btrfs_root *root,
5940                                    struct btrfs_path *path,
5941                                    struct walk_control *wc, int lookup_info)
5942 {
5943         int level = wc->level;
5944         struct extent_buffer *eb = path->nodes[level];
5945         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5946         int ret;
5947
5948         if (wc->stage == UPDATE_BACKREF &&
5949             btrfs_header_owner(eb) != root->root_key.objectid)
5950                 return 1;
5951
5952         /*
5953          * when reference count of tree block is 1, it won't increase
5954          * again. once full backref flag is set, we never clear it.
5955          */
5956         if (lookup_info &&
5957             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
5958              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
5959                 BUG_ON(!path->locks[level]);
5960                 ret = btrfs_lookup_extent_info(trans, root,
5961                                                eb->start, eb->len,
5962                                                &wc->refs[level],
5963                                                &wc->flags[level]);
5964                 BUG_ON(ret);
5965                 BUG_ON(wc->refs[level] == 0);
5966         }
5967
5968         if (wc->stage == DROP_REFERENCE) {
5969                 if (wc->refs[level] > 1)
5970                         return 1;
5971
5972                 if (path->locks[level] && !wc->keep_locks) {
5973                         btrfs_tree_unlock_rw(eb, path->locks[level]);
5974                         path->locks[level] = 0;
5975                 }
5976                 return 0;
5977         }
5978
5979         /* wc->stage == UPDATE_BACKREF */
5980         if (!(wc->flags[level] & flag)) {
5981                 BUG_ON(!path->locks[level]);
5982                 ret = btrfs_inc_ref(trans, root, eb, 1);
5983                 BUG_ON(ret);
5984                 ret = btrfs_dec_ref(trans, root, eb, 0);
5985                 BUG_ON(ret);
5986                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
5987                                                   eb->len, flag, 0);
5988                 BUG_ON(ret);
5989                 wc->flags[level] |= flag;
5990         }
5991
5992         /*
5993          * the block is shared by multiple trees, so it's not good to
5994          * keep the tree lock
5995          */
5996         if (path->locks[level] && level > 0) {
5997                 btrfs_tree_unlock_rw(eb, path->locks[level]);
5998                 path->locks[level] = 0;
5999         }
6000         return 0;
6001 }
6002
6003 /*
6004  * hepler to process tree block pointer.
6005  *
6006  * when wc->stage == DROP_REFERENCE, this function checks
6007  * reference count of the block pointed to. if the block
6008  * is shared and we need update back refs for the subtree
6009  * rooted at the block, this function changes wc->stage to
6010  * UPDATE_BACKREF. if the block is shared and there is no
6011  * need to update back, this function drops the reference
6012  * to the block.
6013  *
6014  * NOTE: return value 1 means we should stop walking down.
6015  */
6016 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6017                                  struct btrfs_root *root,
6018                                  struct btrfs_path *path,
6019                                  struct walk_control *wc, int *lookup_info)
6020 {
6021         u64 bytenr;
6022         u64 generation;
6023         u64 parent;
6024         u32 blocksize;
6025         struct btrfs_key key;
6026         struct extent_buffer *next;
6027         int level = wc->level;
6028         int reada = 0;
6029         int ret = 0;
6030
6031         generation = btrfs_node_ptr_generation(path->nodes[level],
6032                                                path->slots[level]);
6033         /*
6034          * if the lower level block was created before the snapshot
6035          * was created, we know there is no need to update back refs
6036          * for the subtree
6037          */
6038         if (wc->stage == UPDATE_BACKREF &&
6039             generation <= root->root_key.offset) {
6040                 *lookup_info = 1;
6041                 return 1;
6042         }
6043
6044         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
6045         blocksize = btrfs_level_size(root, level - 1);
6046
6047         next = btrfs_find_tree_block(root, bytenr, blocksize);
6048         if (!next) {
6049                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
6050                 if (!next)
6051                         return -ENOMEM;
6052                 reada = 1;
6053         }
6054         btrfs_tree_lock(next);
6055         btrfs_set_lock_blocking(next);
6056
6057         ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6058                                        &wc->refs[level - 1],
6059                                        &wc->flags[level - 1]);
6060         BUG_ON(ret);
6061         BUG_ON(wc->refs[level - 1] == 0);
6062         *lookup_info = 0;
6063
6064         if (wc->stage == DROP_REFERENCE) {
6065                 if (wc->refs[level - 1] > 1) {
6066                         if (level == 1 &&
6067                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6068                                 goto skip;
6069
6070                         if (!wc->update_ref ||
6071                             generation <= root->root_key.offset)
6072                                 goto skip;
6073
6074                         btrfs_node_key_to_cpu(path->nodes[level], &key,
6075                                               path->slots[level]);
6076                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
6077                         if (ret < 0)
6078                                 goto skip;
6079
6080                         wc->stage = UPDATE_BACKREF;
6081                         wc->shared_level = level - 1;
6082                 }
6083         } else {
6084                 if (level == 1 &&
6085                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6086                         goto skip;
6087         }
6088
6089         if (!btrfs_buffer_uptodate(next, generation)) {
6090                 btrfs_tree_unlock(next);
6091                 free_extent_buffer(next);
6092                 next = NULL;
6093                 *lookup_info = 1;
6094         }
6095
6096         if (!next) {
6097                 if (reada && level == 1)
6098                         reada_walk_down(trans, root, wc, path);
6099                 next = read_tree_block(root, bytenr, blocksize, generation);
6100                 if (!next)
6101                         return -EIO;
6102                 btrfs_tree_lock(next);
6103                 btrfs_set_lock_blocking(next);
6104         }
6105
6106         level--;
6107         BUG_ON(level != btrfs_header_level(next));
6108         path->nodes[level] = next;
6109         path->slots[level] = 0;
6110         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6111         wc->level = level;
6112         if (wc->level == 1)
6113                 wc->reada_slot = 0;
6114         return 0;
6115 skip:
6116         wc->refs[level - 1] = 0;
6117         wc->flags[level - 1] = 0;
6118         if (wc->stage == DROP_REFERENCE) {
6119                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6120                         parent = path->nodes[level]->start;
6121                 } else {
6122                         BUG_ON(root->root_key.objectid !=
6123                                btrfs_header_owner(path->nodes[level]));
6124                         parent = 0;
6125                 }
6126
6127                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
6128                                         root->root_key.objectid, level - 1, 0);
6129                 BUG_ON(ret);
6130         }
6131         btrfs_tree_unlock(next);
6132         free_extent_buffer(next);
6133         *lookup_info = 1;
6134         return 1;
6135 }
6136
6137 /*
6138  * hepler to process tree block while walking up the tree.
6139  *
6140  * when wc->stage == DROP_REFERENCE, this function drops
6141  * reference count on the block.
6142  *
6143  * when wc->stage == UPDATE_BACKREF, this function changes
6144  * wc->stage back to DROP_REFERENCE if we changed wc->stage
6145  * to UPDATE_BACKREF previously while processing the block.
6146  *
6147  * NOTE: return value 1 means we should stop walking up.
6148  */
6149 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6150                                  struct btrfs_root *root,
6151                                  struct btrfs_path *path,
6152                                  struct walk_control *wc)
6153 {
6154         int ret;
6155         int level = wc->level;
6156         struct extent_buffer *eb = path->nodes[level];
6157         u64 parent = 0;
6158
6159         if (wc->stage == UPDATE_BACKREF) {
6160                 BUG_ON(wc->shared_level < level);
6161                 if (level < wc->shared_level)
6162                         goto out;
6163
6164                 ret = find_next_key(path, level + 1, &wc->update_progress);
6165                 if (ret > 0)
6166                         wc->update_ref = 0;
6167
6168                 wc->stage = DROP_REFERENCE;
6169                 wc->shared_level = -1;
6170                 path->slots[level] = 0;
6171
6172                 /*
6173                  * check reference count again if the block isn't locked.
6174                  * we should start walking down the tree again if reference
6175                  * count is one.
6176                  */
6177                 if (!path->locks[level]) {
6178                         BUG_ON(level == 0);
6179                         btrfs_tree_lock(eb);
6180                         btrfs_set_lock_blocking(eb);
6181                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6182
6183                         ret = btrfs_lookup_extent_info(trans, root,
6184                                                        eb->start, eb->len,
6185                                                        &wc->refs[level],
6186                                                        &wc->flags[level]);
6187                         BUG_ON(ret);
6188                         BUG_ON(wc->refs[level] == 0);
6189                         if (wc->refs[level] == 1) {
6190                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6191                                 return 1;
6192                         }
6193                 }
6194         }
6195
6196         /* wc->stage == DROP_REFERENCE */
6197         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
6198
6199         if (wc->refs[level] == 1) {
6200                 if (level == 0) {
6201                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6202                                 ret = btrfs_dec_ref(trans, root, eb, 1);
6203                         else
6204                                 ret = btrfs_dec_ref(trans, root, eb, 0);
6205                         BUG_ON(ret);
6206                 }
6207                 /* make block locked assertion in clean_tree_block happy */
6208                 if (!path->locks[level] &&
6209                     btrfs_header_generation(eb) == trans->transid) {
6210                         btrfs_tree_lock(eb);
6211                         btrfs_set_lock_blocking(eb);
6212                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6213                 }
6214                 clean_tree_block(trans, root, eb);
6215         }
6216
6217         if (eb == root->node) {
6218                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6219                         parent = eb->start;
6220                 else
6221                         BUG_ON(root->root_key.objectid !=
6222                                btrfs_header_owner(eb));
6223         } else {
6224                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6225                         parent = path->nodes[level + 1]->start;
6226                 else
6227                         BUG_ON(root->root_key.objectid !=
6228                                btrfs_header_owner(path->nodes[level + 1]));
6229         }
6230
6231         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
6232 out:
6233         wc->refs[level] = 0;
6234         wc->flags[level] = 0;
6235         return 0;
6236 }
6237
6238 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6239                                    struct btrfs_root *root,
6240                                    struct btrfs_path *path,
6241                                    struct walk_control *wc)
6242 {
6243         int level = wc->level;
6244         int lookup_info = 1;
6245         int ret;
6246
6247         while (level >= 0) {
6248                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
6249                 if (ret > 0)
6250                         break;
6251
6252                 if (level == 0)
6253                         break;
6254
6255                 if (path->slots[level] >=
6256                     btrfs_header_nritems(path->nodes[level]))
6257                         break;
6258
6259                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
6260                 if (ret > 0) {
6261                         path->slots[level]++;
6262                         continue;
6263                 } else if (ret < 0)
6264                         return ret;
6265                 level = wc->level;
6266         }
6267         return 0;
6268 }
6269
6270 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6271                                  struct btrfs_root *root,
6272                                  struct btrfs_path *path,
6273                                  struct walk_control *wc, int max_level)
6274 {
6275         int level = wc->level;
6276         int ret;
6277
6278         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6279         while (level < max_level && path->nodes[level]) {
6280                 wc->level = level;
6281                 if (path->slots[level] + 1 <
6282                     btrfs_header_nritems(path->nodes[level])) {
6283                         path->slots[level]++;
6284                         return 0;
6285                 } else {
6286                         ret = walk_up_proc(trans, root, path, wc);
6287                         if (ret > 0)
6288                                 return 0;
6289
6290                         if (path->locks[level]) {
6291                                 btrfs_tree_unlock_rw(path->nodes[level],
6292                                                      path->locks[level]);
6293                                 path->locks[level] = 0;
6294                         }
6295                         free_extent_buffer(path->nodes[level]);
6296                         path->nodes[level] = NULL;
6297                         level++;
6298                 }
6299         }
6300         return 1;
6301 }
6302
6303 /*
6304  * drop a subvolume tree.
6305  *
6306  * this function traverses the tree freeing any blocks that only
6307  * referenced by the tree.
6308  *
6309  * when a shared tree block is found. this function decreases its
6310  * reference count by one. if update_ref is true, this function
6311  * also make sure backrefs for the shared block and all lower level
6312  * blocks are properly updated.
6313  */
6314 void btrfs_drop_snapshot(struct btrfs_root *root,
6315                          struct btrfs_block_rsv *block_rsv, int update_ref)
6316 {
6317         struct btrfs_path *path;
6318         struct btrfs_trans_handle *trans;
6319         struct btrfs_root *tree_root = root->fs_info->tree_root;
6320         struct btrfs_root_item *root_item = &root->root_item;
6321         struct walk_control *wc;
6322         struct btrfs_key key;
6323         int err = 0;
6324         int ret;
6325         int level;
6326
6327         path = btrfs_alloc_path();
6328         if (!path) {
6329                 err = -ENOMEM;
6330                 goto out;
6331         }
6332
6333         wc = kzalloc(sizeof(*wc), GFP_NOFS);
6334         if (!wc) {
6335                 btrfs_free_path(path);
6336                 err = -ENOMEM;
6337                 goto out;
6338         }
6339
6340         trans = btrfs_start_transaction(tree_root, 0);
6341         BUG_ON(IS_ERR(trans));
6342
6343         if (block_rsv)
6344                 trans->block_rsv = block_rsv;
6345
6346         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
6347                 level = btrfs_header_level(root->node);
6348                 path->nodes[level] = btrfs_lock_root_node(root);
6349                 btrfs_set_lock_blocking(path->nodes[level]);
6350                 path->slots[level] = 0;
6351                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6352                 memset(&wc->update_progress, 0,
6353                        sizeof(wc->update_progress));
6354         } else {
6355                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
6356                 memcpy(&wc->update_progress, &key,
6357                        sizeof(wc->update_progress));
6358
6359                 level = root_item->drop_level;
6360                 BUG_ON(level == 0);
6361                 path->lowest_level = level;
6362                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6363                 path->lowest_level = 0;
6364                 if (ret < 0) {
6365                         err = ret;
6366                         goto out_free;
6367                 }
6368                 WARN_ON(ret > 0);
6369
6370                 /*
6371                  * unlock our path, this is safe because only this
6372                  * function is allowed to delete this snapshot
6373                  */
6374                 btrfs_unlock_up_safe(path, 0);
6375
6376                 level = btrfs_header_level(root->node);
6377                 while (1) {
6378                         btrfs_tree_lock(path->nodes[level]);
6379                         btrfs_set_lock_blocking(path->nodes[level]);
6380
6381                         ret = btrfs_lookup_extent_info(trans, root,
6382                                                 path->nodes[level]->start,
6383                                                 path->nodes[level]->len,
6384                                                 &wc->refs[level],
6385                                                 &wc->flags[level]);
6386                         BUG_ON(ret);
6387                         BUG_ON(wc->refs[level] == 0);
6388
6389                         if (level == root_item->drop_level)
6390                                 break;
6391
6392                         btrfs_tree_unlock(path->nodes[level]);
6393                         WARN_ON(wc->refs[level] != 1);
6394                         level--;
6395                 }
6396         }
6397
6398         wc->level = level;
6399         wc->shared_level = -1;
6400         wc->stage = DROP_REFERENCE;
6401         wc->update_ref = update_ref;
6402         wc->keep_locks = 0;
6403         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6404
6405         while (1) {
6406                 ret = walk_down_tree(trans, root, path, wc);
6407                 if (ret < 0) {
6408                         err = ret;
6409                         break;
6410                 }
6411
6412                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
6413                 if (ret < 0) {
6414                         err = ret;
6415                         break;
6416                 }
6417
6418                 if (ret > 0) {
6419                         BUG_ON(wc->stage != DROP_REFERENCE);
6420                         break;
6421                 }
6422
6423                 if (wc->stage == DROP_REFERENCE) {
6424                         level = wc->level;
6425                         btrfs_node_key(path->nodes[level],
6426                                        &root_item->drop_progress,
6427                                        path->slots[level]);
6428                         root_item->drop_level = level;
6429                 }
6430
6431                 BUG_ON(wc->level == 0);
6432                 if (btrfs_should_end_transaction(trans, tree_root)) {
6433                         ret = btrfs_update_root(trans, tree_root,
6434                                                 &root->root_key,
6435                                                 root_item);
6436                         BUG_ON(ret);
6437
6438                         btrfs_end_transaction_throttle(trans, tree_root);
6439                         trans = btrfs_start_transaction(tree_root, 0);
6440                         BUG_ON(IS_ERR(trans));
6441                         if (block_rsv)
6442                                 trans->block_rsv = block_rsv;
6443                 }
6444         }
6445         btrfs_release_path(path);
6446         BUG_ON(err);
6447
6448         ret = btrfs_del_root(trans, tree_root, &root->root_key);
6449         BUG_ON(ret);
6450
6451         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
6452                 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
6453                                            NULL, NULL);
6454                 BUG_ON(ret < 0);
6455                 if (ret > 0) {
6456                         /* if we fail to delete the orphan item this time
6457                          * around, it'll get picked up the next time.
6458                          *
6459                          * The most common failure here is just -ENOENT.
6460                          */
6461                         btrfs_del_orphan_item(trans, tree_root,
6462                                               root->root_key.objectid);
6463                 }
6464         }
6465
6466         if (root->in_radix) {
6467                 btrfs_free_fs_root(tree_root->fs_info, root);
6468         } else {
6469                 free_extent_buffer(root->node);
6470                 free_extent_buffer(root->commit_root);
6471                 kfree(root);
6472         }
6473 out_free:
6474         btrfs_end_transaction_throttle(trans, tree_root);
6475         kfree(wc);
6476         btrfs_free_path(path);
6477 out:
6478         if (err)
6479                 btrfs_std_error(root->fs_info, err);
6480         return;
6481 }
6482
6483 /*
6484  * drop subtree rooted at tree block 'node'.
6485  *
6486  * NOTE: this function will unlock and release tree block 'node'
6487  */
6488 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6489                         struct btrfs_root *root,
6490                         struct extent_buffer *node,
6491                         struct extent_buffer *parent)
6492 {
6493         struct btrfs_path *path;
6494         struct walk_control *wc;
6495         int level;
6496         int parent_level;
6497         int ret = 0;
6498         int wret;
6499
6500         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6501
6502         path = btrfs_alloc_path();
6503         if (!path)
6504                 return -ENOMEM;
6505
6506         wc = kzalloc(sizeof(*wc), GFP_NOFS);
6507         if (!wc) {
6508                 btrfs_free_path(path);
6509                 return -ENOMEM;
6510         }
6511
6512         btrfs_assert_tree_locked(parent);
6513         parent_level = btrfs_header_level(parent);
6514         extent_buffer_get(parent);
6515         path->nodes[parent_level] = parent;
6516         path->slots[parent_level] = btrfs_header_nritems(parent);
6517
6518         btrfs_assert_tree_locked(node);
6519         level = btrfs_header_level(node);
6520         path->nodes[level] = node;
6521         path->slots[level] = 0;
6522         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6523
6524         wc->refs[parent_level] = 1;
6525         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6526         wc->level = level;
6527         wc->shared_level = -1;
6528         wc->stage = DROP_REFERENCE;
6529         wc->update_ref = 0;
6530         wc->keep_locks = 1;
6531         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6532
6533         while (1) {
6534                 wret = walk_down_tree(trans, root, path, wc);
6535                 if (wret < 0) {
6536                         ret = wret;
6537                         break;
6538                 }
6539
6540                 wret = walk_up_tree(trans, root, path, wc, parent_level);
6541                 if (wret < 0)
6542                         ret = wret;
6543                 if (wret != 0)
6544                         break;
6545         }
6546
6547         kfree(wc);
6548         btrfs_free_path(path);
6549         return ret;
6550 }
6551
6552 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
6553 {
6554         u64 num_devices;
6555         u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
6556                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
6557
6558         /*
6559          * we add in the count of missing devices because we want
6560          * to make sure that any RAID levels on a degraded FS
6561          * continue to be honored.
6562          */
6563         num_devices = root->fs_info->fs_devices->rw_devices +
6564                 root->fs_info->fs_devices->missing_devices;
6565
6566         if (num_devices == 1) {
6567                 stripped |= BTRFS_BLOCK_GROUP_DUP;
6568                 stripped = flags & ~stripped;
6569
6570                 /* turn raid0 into single device chunks */
6571                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
6572                         return stripped;
6573
6574                 /* turn mirroring into duplication */
6575                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
6576                              BTRFS_BLOCK_GROUP_RAID10))
6577                         return stripped | BTRFS_BLOCK_GROUP_DUP;
6578                 return flags;
6579         } else {
6580                 /* they already had raid on here, just return */
6581                 if (flags & stripped)
6582                         return flags;
6583
6584                 stripped |= BTRFS_BLOCK_GROUP_DUP;
6585                 stripped = flags & ~stripped;
6586
6587                 /* switch duplicated blocks with raid1 */
6588                 if (flags & BTRFS_BLOCK_GROUP_DUP)
6589                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
6590
6591                 /* turn single device chunks into raid0 */
6592                 return stripped | BTRFS_BLOCK_GROUP_RAID0;
6593         }
6594         return flags;
6595 }
6596
6597 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
6598 {
6599         struct btrfs_space_info *sinfo = cache->space_info;
6600         u64 num_bytes;
6601         u64 min_allocable_bytes;
6602         int ret = -ENOSPC;
6603
6604
6605         /*
6606          * We need some metadata space and system metadata space for
6607          * allocating chunks in some corner cases until we force to set
6608          * it to be readonly.
6609          */
6610         if ((sinfo->flags &
6611              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
6612             !force)
6613                 min_allocable_bytes = 1 * 1024 * 1024;
6614         else
6615                 min_allocable_bytes = 0;
6616
6617         spin_lock(&sinfo->lock);
6618         spin_lock(&cache->lock);
6619
6620         if (cache->ro) {
6621                 ret = 0;
6622                 goto out;
6623         }
6624
6625         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
6626                     cache->bytes_super - btrfs_block_group_used(&cache->item);
6627
6628         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
6629             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
6630             min_allocable_bytes <= sinfo->total_bytes) {
6631                 sinfo->bytes_readonly += num_bytes;
6632                 cache->ro = 1;
6633                 ret = 0;
6634         }
6635 out:
6636         spin_unlock(&cache->lock);
6637         spin_unlock(&sinfo->lock);
6638         return ret;
6639 }
6640
6641 int btrfs_set_block_group_ro(struct btrfs_root *root,
6642                              struct btrfs_block_group_cache *cache)
6643
6644 {
6645         struct btrfs_trans_handle *trans;
6646         u64 alloc_flags;
6647         int ret;
6648
6649         BUG_ON(cache->ro);
6650
6651         trans = btrfs_join_transaction(root);
6652         BUG_ON(IS_ERR(trans));
6653
6654         alloc_flags = update_block_group_flags(root, cache->flags);
6655         if (alloc_flags != cache->flags)
6656                 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
6657                                CHUNK_ALLOC_FORCE);
6658
6659         ret = set_block_group_ro(cache, 0);
6660         if (!ret)
6661                 goto out;
6662         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
6663         ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
6664                              CHUNK_ALLOC_FORCE);
6665         if (ret < 0)
6666                 goto out;
6667         ret = set_block_group_ro(cache, 0);
6668 out:
6669         btrfs_end_transaction(trans, root);
6670         return ret;
6671 }
6672
6673 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
6674                             struct btrfs_root *root, u64 type)
6675 {
6676         u64 alloc_flags = get_alloc_profile(root, type);
6677         return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
6678                               CHUNK_ALLOC_FORCE);
6679 }
6680
6681 /*
6682  * helper to account the unused space of all the readonly block group in the
6683  * list. takes mirrors into account.
6684  */
6685 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
6686 {
6687         struct btrfs_block_group_cache *block_group;
6688         u64 free_bytes = 0;
6689         int factor;
6690
6691         list_for_each_entry(block_group, groups_list, list) {
6692                 spin_lock(&block_group->lock);
6693
6694                 if (!block_group->ro) {
6695                         spin_unlock(&block_group->lock);
6696                         continue;
6697                 }
6698
6699                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
6700                                           BTRFS_BLOCK_GROUP_RAID10 |
6701                                           BTRFS_BLOCK_GROUP_DUP))
6702                         factor = 2;
6703                 else
6704                         factor = 1;
6705
6706                 free_bytes += (block_group->key.offset -
6707                                btrfs_block_group_used(&block_group->item)) *
6708                                factor;
6709
6710                 spin_unlock(&block_group->lock);
6711         }
6712
6713         return free_bytes;
6714 }
6715
6716 /*
6717  * helper to account the unused space of all the readonly block group in the
6718  * space_info. takes mirrors into account.
6719  */
6720 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
6721 {
6722         int i;
6723         u64 free_bytes = 0;
6724
6725         spin_lock(&sinfo->lock);
6726
6727         for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
6728                 if (!list_empty(&sinfo->block_groups[i]))
6729                         free_bytes += __btrfs_get_ro_block_group_free_space(
6730                                                 &sinfo->block_groups[i]);
6731
6732         spin_unlock(&sinfo->lock);
6733
6734         return free_bytes;
6735 }
6736
6737 int btrfs_set_block_group_rw(struct btrfs_root *root,
6738                               struct btrfs_block_group_cache *cache)
6739 {
6740         struct btrfs_space_info *sinfo = cache->space_info;
6741         u64 num_bytes;
6742
6743         BUG_ON(!cache->ro);
6744
6745         spin_lock(&sinfo->lock);
6746         spin_lock(&cache->lock);
6747         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
6748                     cache->bytes_super - btrfs_block_group_used(&cache->item);
6749         sinfo->bytes_readonly -= num_bytes;
6750         cache->ro = 0;
6751         spin_unlock(&cache->lock);
6752         spin_unlock(&sinfo->lock);
6753         return 0;
6754 }
6755
6756 /*
6757  * checks to see if its even possible to relocate this block group.
6758  *
6759  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
6760  * ok to go ahead and try.
6761  */
6762 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
6763 {
6764         struct btrfs_block_group_cache *block_group;
6765         struct btrfs_space_info *space_info;
6766         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6767         struct btrfs_device *device;
6768         u64 min_free;
6769         u64 dev_min = 1;
6770         u64 dev_nr = 0;
6771         int index;
6772         int full = 0;
6773         int ret = 0;
6774
6775         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
6776
6777         /* odd, couldn't find the block group, leave it alone */
6778         if (!block_group)
6779                 return -1;
6780
6781         min_free = btrfs_block_group_used(&block_group->item);
6782
6783         /* no bytes used, we're good */
6784         if (!min_free)
6785                 goto out;
6786
6787         space_info = block_group->space_info;
6788         spin_lock(&space_info->lock);
6789
6790         full = space_info->full;
6791
6792         /*
6793          * if this is the last block group we have in this space, we can't
6794          * relocate it unless we're able to allocate a new chunk below.
6795          *
6796          * Otherwise, we need to make sure we have room in the space to handle
6797          * all of the extents from this block group.  If we can, we're good
6798          */
6799         if ((space_info->total_bytes != block_group->key.offset) &&
6800             (space_info->bytes_used + space_info->bytes_reserved +
6801              space_info->bytes_pinned + space_info->bytes_readonly +
6802              min_free < space_info->total_bytes)) {
6803                 spin_unlock(&space_info->lock);
6804                 goto out;
6805         }
6806         spin_unlock(&space_info->lock);
6807
6808         /*
6809          * ok we don't have enough space, but maybe we have free space on our
6810          * devices to allocate new chunks for relocation, so loop through our
6811          * alloc devices and guess if we have enough space.  However, if we
6812          * were marked as full, then we know there aren't enough chunks, and we
6813          * can just return.
6814          */
6815         ret = -1;
6816         if (full)
6817                 goto out;
6818
6819         /*
6820          * index:
6821          *      0: raid10
6822          *      1: raid1
6823          *      2: dup
6824          *      3: raid0
6825          *      4: single
6826          */
6827         index = get_block_group_index(block_group);
6828         if (index == 0) {
6829                 dev_min = 4;
6830                 /* Divide by 2 */
6831                 min_free >>= 1;
6832         } else if (index == 1) {
6833                 dev_min = 2;
6834         } else if (index == 2) {
6835                 /* Multiply by 2 */
6836                 min_free <<= 1;
6837         } else if (index == 3) {
6838                 dev_min = fs_devices->rw_devices;
6839                 do_div(min_free, dev_min);
6840         }
6841
6842         mutex_lock(&root->fs_info->chunk_mutex);
6843         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
6844                 u64 dev_offset;
6845
6846                 /*
6847                  * check to make sure we can actually find a chunk with enough
6848                  * space to fit our block group in.
6849                  */
6850                 if (device->total_bytes > device->bytes_used + min_free) {
6851                         ret = find_free_dev_extent(NULL, device, min_free,
6852                                                    &dev_offset, NULL);
6853                         if (!ret)
6854                                 dev_nr++;
6855
6856                         if (dev_nr >= dev_min)
6857                                 break;
6858
6859                         ret = -1;
6860                 }
6861         }
6862         mutex_unlock(&root->fs_info->chunk_mutex);
6863 out:
6864         btrfs_put_block_group(block_group);
6865         return ret;
6866 }
6867
6868 static int find_first_block_group(struct btrfs_root *root,
6869                 struct btrfs_path *path, struct btrfs_key *key)
6870 {
6871         int ret = 0;
6872         struct btrfs_key found_key;
6873         struct extent_buffer *leaf;
6874         int slot;
6875
6876         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
6877         if (ret < 0)
6878                 goto out;
6879
6880         while (1) {
6881                 slot = path->slots[0];
6882                 leaf = path->nodes[0];
6883                 if (slot >= btrfs_header_nritems(leaf)) {
6884                         ret = btrfs_next_leaf(root, path);
6885                         if (ret == 0)
6886                                 continue;
6887                         if (ret < 0)
6888                                 goto out;
6889                         break;
6890                 }
6891                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6892
6893                 if (found_key.objectid >= key->objectid &&
6894                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
6895                         ret = 0;
6896                         goto out;
6897                 }
6898                 path->slots[0]++;
6899         }
6900 out:
6901         return ret;
6902 }
6903
6904 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
6905 {
6906         struct btrfs_block_group_cache *block_group;
6907         u64 last = 0;
6908
6909         while (1) {
6910                 struct inode *inode;
6911
6912                 block_group = btrfs_lookup_first_block_group(info, last);
6913                 while (block_group) {
6914                         spin_lock(&block_group->lock);
6915                         if (block_group->iref)
6916                                 break;
6917                         spin_unlock(&block_group->lock);
6918                         block_group = next_block_group(info->tree_root,
6919                                                        block_group);
6920                 }
6921                 if (!block_group) {
6922                         if (last == 0)
6923                                 break;
6924                         last = 0;
6925                         continue;
6926                 }
6927
6928                 inode = block_group->inode;
6929                 block_group->iref = 0;
6930                 block_group->inode = NULL;
6931                 spin_unlock(&block_group->lock);
6932                 iput(inode);
6933                 last = block_group->key.objectid + block_group->key.offset;
6934                 btrfs_put_block_group(block_group);
6935         }
6936 }
6937
6938 int btrfs_free_block_groups(struct btrfs_fs_info *info)
6939 {
6940         struct btrfs_block_group_cache *block_group;
6941         struct btrfs_space_info *space_info;
6942         struct btrfs_caching_control *caching_ctl;
6943         struct rb_node *n;
6944
6945         down_write(&info->extent_commit_sem);
6946         while (!list_empty(&info->caching_block_groups)) {
6947                 caching_ctl = list_entry(info->caching_block_groups.next,
6948                                          struct btrfs_caching_control, list);
6949                 list_del(&caching_ctl->list);
6950                 put_caching_control(caching_ctl);
6951         }
6952         up_write(&info->extent_commit_sem);
6953
6954         spin_lock(&info->block_group_cache_lock);
6955         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
6956                 block_group = rb_entry(n, struct btrfs_block_group_cache,
6957                                        cache_node);
6958                 rb_erase(&block_group->cache_node,
6959                          &info->block_group_cache_tree);
6960                 spin_unlock(&info->block_group_cache_lock);
6961
6962                 down_write(&block_group->space_info->groups_sem);
6963                 list_del(&block_group->list);
6964                 up_write(&block_group->space_info->groups_sem);
6965
6966                 if (block_group->cached == BTRFS_CACHE_STARTED)
6967                         wait_block_group_cache_done(block_group);
6968
6969                 /*
6970                  * We haven't cached this block group, which means we could
6971                  * possibly have excluded extents on this block group.
6972                  */
6973                 if (block_group->cached == BTRFS_CACHE_NO)
6974                         free_excluded_extents(info->extent_root, block_group);
6975
6976                 btrfs_remove_free_space_cache(block_group);
6977                 btrfs_put_block_group(block_group);
6978
6979                 spin_lock(&info->block_group_cache_lock);
6980         }
6981         spin_unlock(&info->block_group_cache_lock);
6982
6983         /* now that all the block groups are freed, go through and
6984          * free all the space_info structs.  This is only called during
6985          * the final stages of unmount, and so we know nobody is
6986          * using them.  We call synchronize_rcu() once before we start,
6987          * just to be on the safe side.
6988          */
6989         synchronize_rcu();
6990
6991         release_global_block_rsv(info);
6992
6993         while(!list_empty(&info->space_info)) {
6994                 space_info = list_entry(info->space_info.next,
6995                                         struct btrfs_space_info,
6996                                         list);
6997                 if (space_info->bytes_pinned > 0 ||
6998                     space_info->bytes_reserved > 0 ||
6999                     space_info->bytes_may_use > 0) {
7000                         WARN_ON(1);
7001                         dump_space_info(space_info, 0, 0);
7002                 }
7003                 list_del(&space_info->list);
7004                 kfree(space_info);
7005         }
7006         return 0;
7007 }
7008
7009 static void __link_block_group(struct btrfs_space_info *space_info,
7010                                struct btrfs_block_group_cache *cache)
7011 {
7012         int index = get_block_group_index(cache);
7013
7014         down_write(&space_info->groups_sem);
7015         list_add_tail(&cache->list, &space_info->block_groups[index]);
7016         up_write(&space_info->groups_sem);
7017 }
7018
7019 int btrfs_read_block_groups(struct btrfs_root *root)
7020 {
7021         struct btrfs_path *path;
7022         int ret;
7023         struct btrfs_block_group_cache *cache;
7024         struct btrfs_fs_info *info = root->fs_info;
7025         struct btrfs_space_info *space_info;
7026         struct btrfs_key key;
7027         struct btrfs_key found_key;
7028         struct extent_buffer *leaf;
7029         int need_clear = 0;
7030         u64 cache_gen;
7031
7032         root = info->extent_root;
7033         key.objectid = 0;
7034         key.offset = 0;
7035         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
7036         path = btrfs_alloc_path();
7037         if (!path)
7038                 return -ENOMEM;
7039         path->reada = 1;
7040
7041         cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy);
7042         if (btrfs_test_opt(root, SPACE_CACHE) &&
7043             btrfs_super_generation(&root->fs_info->super_copy) != cache_gen)
7044                 need_clear = 1;
7045         if (btrfs_test_opt(root, CLEAR_CACHE))
7046                 need_clear = 1;
7047
7048         while (1) {
7049                 ret = find_first_block_group(root, path, &key);
7050                 if (ret > 0)
7051                         break;
7052                 if (ret != 0)
7053                         goto error;
7054                 leaf = path->nodes[0];
7055                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7056                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
7057                 if (!cache) {
7058                         ret = -ENOMEM;
7059                         goto error;
7060                 }
7061                 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7062                                                 GFP_NOFS);
7063                 if (!cache->free_space_ctl) {
7064                         kfree(cache);
7065                         ret = -ENOMEM;
7066                         goto error;
7067                 }
7068
7069                 atomic_set(&cache->count, 1);
7070                 spin_lock_init(&cache->lock);
7071                 cache->fs_info = info;
7072                 INIT_LIST_HEAD(&cache->list);
7073                 INIT_LIST_HEAD(&cache->cluster_list);
7074
7075                 if (need_clear)
7076                         cache->disk_cache_state = BTRFS_DC_CLEAR;
7077
7078                 read_extent_buffer(leaf, &cache->item,
7079                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
7080                                    sizeof(cache->item));
7081                 memcpy(&cache->key, &found_key, sizeof(found_key));
7082
7083                 key.objectid = found_key.objectid + found_key.offset;
7084                 btrfs_release_path(path);
7085                 cache->flags = btrfs_block_group_flags(&cache->item);
7086                 cache->sectorsize = root->sectorsize;
7087
7088                 btrfs_init_free_space_ctl(cache);
7089
7090                 /*
7091                  * We need to exclude the super stripes now so that the space
7092                  * info has super bytes accounted for, otherwise we'll think
7093                  * we have more space than we actually do.
7094                  */
7095                 exclude_super_stripes(root, cache);
7096
7097                 /*
7098                  * check for two cases, either we are full, and therefore
7099                  * don't need to bother with the caching work since we won't
7100                  * find any space, or we are empty, and we can just add all
7101                  * the space in and be done with it.  This saves us _alot_ of
7102                  * time, particularly in the full case.
7103                  */
7104                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7105                         cache->last_byte_to_unpin = (u64)-1;
7106                         cache->cached = BTRFS_CACHE_FINISHED;
7107                         free_excluded_extents(root, cache);
7108                 } else if (btrfs_block_group_used(&cache->item) == 0) {
7109                         cache->last_byte_to_unpin = (u64)-1;
7110                         cache->cached = BTRFS_CACHE_FINISHED;
7111                         add_new_free_space(cache, root->fs_info,
7112                                            found_key.objectid,
7113                                            found_key.objectid +
7114                                            found_key.offset);
7115                         free_excluded_extents(root, cache);
7116                 }
7117
7118                 ret = update_space_info(info, cache->flags, found_key.offset,
7119                                         btrfs_block_group_used(&cache->item),
7120                                         &space_info);
7121                 BUG_ON(ret);
7122                 cache->space_info = space_info;
7123                 spin_lock(&cache->space_info->lock);
7124                 cache->space_info->bytes_readonly += cache->bytes_super;
7125                 spin_unlock(&cache->space_info->lock);
7126
7127                 __link_block_group(space_info, cache);
7128
7129                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7130                 BUG_ON(ret);
7131
7132                 set_avail_alloc_bits(root->fs_info, cache->flags);
7133                 if (btrfs_chunk_readonly(root, cache->key.objectid))
7134                         set_block_group_ro(cache, 1);
7135         }
7136
7137         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
7138                 if (!(get_alloc_profile(root, space_info->flags) &
7139                       (BTRFS_BLOCK_GROUP_RAID10 |
7140                        BTRFS_BLOCK_GROUP_RAID1 |
7141                        BTRFS_BLOCK_GROUP_DUP)))
7142                         continue;
7143                 /*
7144                  * avoid allocating from un-mirrored block group if there are
7145                  * mirrored block groups.
7146                  */
7147                 list_for_each_entry(cache, &space_info->block_groups[3], list)
7148                         set_block_group_ro(cache, 1);
7149                 list_for_each_entry(cache, &space_info->block_groups[4], list)
7150                         set_block_group_ro(cache, 1);
7151         }
7152
7153         init_global_block_rsv(info);
7154         ret = 0;
7155 error:
7156         btrfs_free_path(path);
7157         return ret;
7158 }
7159
7160 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7161                            struct btrfs_root *root, u64 bytes_used,
7162                            u64 type, u64 chunk_objectid, u64 chunk_offset,
7163                            u64 size)
7164 {
7165         int ret;
7166         struct btrfs_root *extent_root;
7167         struct btrfs_block_group_cache *cache;
7168
7169         extent_root = root->fs_info->extent_root;
7170
7171         root->fs_info->last_trans_log_full_commit = trans->transid;
7172
7173         cache = kzalloc(sizeof(*cache), GFP_NOFS);
7174         if (!cache)
7175                 return -ENOMEM;
7176         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7177                                         GFP_NOFS);
7178         if (!cache->free_space_ctl) {
7179                 kfree(cache);
7180                 return -ENOMEM;
7181         }
7182
7183         cache->key.objectid = chunk_offset;
7184         cache->key.offset = size;
7185         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7186         cache->sectorsize = root->sectorsize;
7187         cache->fs_info = root->fs_info;
7188
7189         atomic_set(&cache->count, 1);
7190         spin_lock_init(&cache->lock);
7191         INIT_LIST_HEAD(&cache->list);
7192         INIT_LIST_HEAD(&cache->cluster_list);
7193
7194         btrfs_init_free_space_ctl(cache);
7195
7196         btrfs_set_block_group_used(&cache->item, bytes_used);
7197         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7198         cache->flags = type;
7199         btrfs_set_block_group_flags(&cache->item, type);
7200
7201         cache->last_byte_to_unpin = (u64)-1;
7202         cache->cached = BTRFS_CACHE_FINISHED;
7203         exclude_super_stripes(root, cache);
7204
7205         add_new_free_space(cache, root->fs_info, chunk_offset,
7206                            chunk_offset + size);
7207
7208         free_excluded_extents(root, cache);
7209
7210         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7211                                 &cache->space_info);
7212         BUG_ON(ret);
7213
7214         spin_lock(&cache->space_info->lock);
7215         cache->space_info->bytes_readonly += cache->bytes_super;
7216         spin_unlock(&cache->space_info->lock);
7217
7218         __link_block_group(cache->space_info, cache);
7219
7220         ret = btrfs_add_block_group_cache(root->fs_info, cache);
7221         BUG_ON(ret);
7222
7223         ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
7224                                 sizeof(cache->item));
7225         BUG_ON(ret);
7226
7227         set_avail_alloc_bits(extent_root->fs_info, type);
7228
7229         return 0;
7230 }
7231
7232 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7233                              struct btrfs_root *root, u64 group_start)
7234 {
7235         struct btrfs_path *path;
7236         struct btrfs_block_group_cache *block_group;
7237         struct btrfs_free_cluster *cluster;
7238         struct btrfs_root *tree_root = root->fs_info->tree_root;
7239         struct btrfs_key key;
7240         struct inode *inode;
7241         int ret;
7242         int factor;
7243
7244         root = root->fs_info->extent_root;
7245
7246         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7247         BUG_ON(!block_group);
7248         BUG_ON(!block_group->ro);
7249
7250         /*
7251          * Free the reserved super bytes from this block group before
7252          * remove it.
7253          */
7254         free_excluded_extents(root, block_group);
7255
7256         memcpy(&key, &block_group->key, sizeof(key));
7257         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
7258                                   BTRFS_BLOCK_GROUP_RAID1 |
7259                                   BTRFS_BLOCK_GROUP_RAID10))
7260                 factor = 2;
7261         else
7262                 factor = 1;
7263
7264         /* make sure this block group isn't part of an allocation cluster */
7265         cluster = &root->fs_info->data_alloc_cluster;
7266         spin_lock(&cluster->refill_lock);
7267         btrfs_return_cluster_to_free_space(block_group, cluster);
7268         spin_unlock(&cluster->refill_lock);
7269
7270         /*
7271          * make sure this block group isn't part of a metadata
7272          * allocation cluster
7273          */
7274         cluster = &root->fs_info->meta_alloc_cluster;
7275         spin_lock(&cluster->refill_lock);
7276         btrfs_return_cluster_to_free_space(block_group, cluster);
7277         spin_unlock(&cluster->refill_lock);
7278
7279         path = btrfs_alloc_path();
7280         if (!path) {
7281                 ret = -ENOMEM;
7282                 goto out;
7283         }
7284
7285         inode = lookup_free_space_inode(root, block_group, path);
7286         if (!IS_ERR(inode)) {
7287                 ret = btrfs_orphan_add(trans, inode);
7288                 BUG_ON(ret);
7289                 clear_nlink(inode);
7290                 /* One for the block groups ref */
7291                 spin_lock(&block_group->lock);
7292                 if (block_group->iref) {
7293                         block_group->iref = 0;
7294                         block_group->inode = NULL;
7295                         spin_unlock(&block_group->lock);
7296                         iput(inode);
7297                 } else {
7298                         spin_unlock(&block_group->lock);
7299                 }
7300                 /* One for our lookup ref */
7301                 btrfs_add_delayed_iput(inode);
7302         }
7303
7304         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
7305         key.offset = block_group->key.objectid;
7306         key.type = 0;
7307
7308         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
7309         if (ret < 0)
7310                 goto out;
7311         if (ret > 0)
7312                 btrfs_release_path(path);
7313         if (ret == 0) {
7314                 ret = btrfs_del_item(trans, tree_root, path);
7315                 if (ret)
7316                         goto out;
7317                 btrfs_release_path(path);
7318         }
7319
7320         spin_lock(&root->fs_info->block_group_cache_lock);
7321         rb_erase(&block_group->cache_node,
7322                  &root->fs_info->block_group_cache_tree);
7323         spin_unlock(&root->fs_info->block_group_cache_lock);
7324
7325         down_write(&block_group->space_info->groups_sem);
7326         /*
7327          * we must use list_del_init so people can check to see if they
7328          * are still on the list after taking the semaphore
7329          */
7330         list_del_init(&block_group->list);
7331         up_write(&block_group->space_info->groups_sem);
7332
7333         if (block_group->cached == BTRFS_CACHE_STARTED)
7334                 wait_block_group_cache_done(block_group);
7335
7336         btrfs_remove_free_space_cache(block_group);
7337
7338         spin_lock(&block_group->space_info->lock);
7339         block_group->space_info->total_bytes -= block_group->key.offset;
7340         block_group->space_info->bytes_readonly -= block_group->key.offset;
7341         block_group->space_info->disk_total -= block_group->key.offset * factor;
7342         spin_unlock(&block_group->space_info->lock);
7343
7344         memcpy(&key, &block_group->key, sizeof(key));
7345
7346         btrfs_clear_space_info_full(root->fs_info);
7347
7348         btrfs_put_block_group(block_group);
7349         btrfs_put_block_group(block_group);
7350
7351         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7352         if (ret > 0)
7353                 ret = -EIO;
7354         if (ret < 0)
7355                 goto out;
7356
7357         ret = btrfs_del_item(trans, root, path);
7358 out:
7359         btrfs_free_path(path);
7360         return ret;
7361 }
7362
7363 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
7364 {
7365         struct btrfs_space_info *space_info;
7366         struct btrfs_super_block *disk_super;
7367         u64 features;
7368         u64 flags;
7369         int mixed = 0;
7370         int ret;
7371
7372         disk_super = &fs_info->super_copy;
7373         if (!btrfs_super_root(disk_super))
7374                 return 1;
7375
7376         features = btrfs_super_incompat_flags(disk_super);
7377         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
7378                 mixed = 1;
7379
7380         flags = BTRFS_BLOCK_GROUP_SYSTEM;
7381         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7382         if (ret)
7383                 goto out;
7384
7385         if (mixed) {
7386                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
7387                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7388         } else {
7389                 flags = BTRFS_BLOCK_GROUP_METADATA;
7390                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7391                 if (ret)
7392                         goto out;
7393
7394                 flags = BTRFS_BLOCK_GROUP_DATA;
7395                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7396         }
7397 out:
7398         return ret;
7399 }
7400
7401 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
7402 {
7403         return unpin_extent_range(root, start, end);
7404 }
7405
7406 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
7407                                u64 num_bytes, u64 *actual_bytes)
7408 {
7409         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
7410 }
7411
7412 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
7413 {
7414         struct btrfs_fs_info *fs_info = root->fs_info;
7415         struct btrfs_block_group_cache *cache = NULL;
7416         u64 group_trimmed;
7417         u64 start;
7418         u64 end;
7419         u64 trimmed = 0;
7420         int ret = 0;
7421
7422         cache = btrfs_lookup_block_group(fs_info, range->start);
7423
7424         while (cache) {
7425                 if (cache->key.objectid >= (range->start + range->len)) {
7426                         btrfs_put_block_group(cache);
7427                         break;
7428                 }
7429
7430                 start = max(range->start, cache->key.objectid);
7431                 end = min(range->start + range->len,
7432                                 cache->key.objectid + cache->key.offset);
7433
7434                 if (end - start >= range->minlen) {
7435                         if (!block_group_cache_done(cache)) {
7436                                 ret = cache_block_group(cache, NULL, root, 0);
7437                                 if (!ret)
7438                                         wait_block_group_cache_done(cache);
7439                         }
7440                         ret = btrfs_trim_block_group(cache,
7441                                                      &group_trimmed,
7442                                                      start,
7443                                                      end,
7444                                                      range->minlen);
7445
7446                         trimmed += group_trimmed;
7447                         if (ret) {
7448                                 btrfs_put_block_group(cache);
7449                                 break;
7450                         }
7451                 }
7452
7453                 cache = next_block_group(fs_info->tree_root, cache);
7454         }
7455
7456         range->len = trimmed;
7457         return ret;
7458 }