2ce16f97730a7c3986642f19b2e8f7d035d50227
[firefly-linux-kernel-4.4.55.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include "compat.h"
28 #include "hash.h"
29 #include "ctree.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "transaction.h"
33 #include "volumes.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36
37 #undef SCRAMBLE_DELAYED_REFS
38
39 /*
40  * control flags for do_chunk_alloc's force field
41  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
42  * if we really need one.
43  *
44  * CHUNK_ALLOC_LIMITED means to only try and allocate one
45  * if we have very few chunks already allocated.  This is
46  * used as part of the clustering code to help make sure
47  * we have a good pool of storage to cluster in, without
48  * filling the FS with empty chunks
49  *
50  * CHUNK_ALLOC_FORCE means it must try to allocate one
51  *
52  */
53 enum {
54         CHUNK_ALLOC_NO_FORCE = 0,
55         CHUNK_ALLOC_LIMITED = 1,
56         CHUNK_ALLOC_FORCE = 2,
57 };
58
59 /*
60  * Control how reservations are dealt with.
61  *
62  * RESERVE_FREE - freeing a reservation.
63  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
64  *   ENOSPC accounting
65  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
66  *   bytes_may_use as the ENOSPC accounting is done elsewhere
67  */
68 enum {
69         RESERVE_FREE = 0,
70         RESERVE_ALLOC = 1,
71         RESERVE_ALLOC_NO_ACCOUNT = 2,
72 };
73
74 static int update_block_group(struct btrfs_trans_handle *trans,
75                               struct btrfs_root *root,
76                               u64 bytenr, u64 num_bytes, int alloc);
77 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
78                                 struct btrfs_root *root,
79                                 u64 bytenr, u64 num_bytes, u64 parent,
80                                 u64 root_objectid, u64 owner_objectid,
81                                 u64 owner_offset, int refs_to_drop,
82                                 struct btrfs_delayed_extent_op *extra_op);
83 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
84                                     struct extent_buffer *leaf,
85                                     struct btrfs_extent_item *ei);
86 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
87                                       struct btrfs_root *root,
88                                       u64 parent, u64 root_objectid,
89                                       u64 flags, u64 owner, u64 offset,
90                                       struct btrfs_key *ins, int ref_mod);
91 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
92                                      struct btrfs_root *root,
93                                      u64 parent, u64 root_objectid,
94                                      u64 flags, struct btrfs_disk_key *key,
95                                      int level, struct btrfs_key *ins);
96 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
97                           struct btrfs_root *extent_root, u64 alloc_bytes,
98                           u64 flags, int force);
99 static int find_next_key(struct btrfs_path *path, int level,
100                          struct btrfs_key *key);
101 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
102                             int dump_block_groups);
103 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
104                                        u64 num_bytes, int reserve);
105
106 static noinline int
107 block_group_cache_done(struct btrfs_block_group_cache *cache)
108 {
109         smp_mb();
110         return cache->cached == BTRFS_CACHE_FINISHED;
111 }
112
113 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
114 {
115         return (cache->flags & bits) == bits;
116 }
117
118 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
119 {
120         atomic_inc(&cache->count);
121 }
122
123 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
124 {
125         if (atomic_dec_and_test(&cache->count)) {
126                 WARN_ON(cache->pinned > 0);
127                 WARN_ON(cache->reserved > 0);
128                 kfree(cache->free_space_ctl);
129                 kfree(cache);
130         }
131 }
132
133 /*
134  * this adds the block group to the fs_info rb tree for the block group
135  * cache
136  */
137 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
138                                 struct btrfs_block_group_cache *block_group)
139 {
140         struct rb_node **p;
141         struct rb_node *parent = NULL;
142         struct btrfs_block_group_cache *cache;
143
144         spin_lock(&info->block_group_cache_lock);
145         p = &info->block_group_cache_tree.rb_node;
146
147         while (*p) {
148                 parent = *p;
149                 cache = rb_entry(parent, struct btrfs_block_group_cache,
150                                  cache_node);
151                 if (block_group->key.objectid < cache->key.objectid) {
152                         p = &(*p)->rb_left;
153                 } else if (block_group->key.objectid > cache->key.objectid) {
154                         p = &(*p)->rb_right;
155                 } else {
156                         spin_unlock(&info->block_group_cache_lock);
157                         return -EEXIST;
158                 }
159         }
160
161         rb_link_node(&block_group->cache_node, parent, p);
162         rb_insert_color(&block_group->cache_node,
163                         &info->block_group_cache_tree);
164         spin_unlock(&info->block_group_cache_lock);
165
166         return 0;
167 }
168
169 /*
170  * This will return the block group at or after bytenr if contains is 0, else
171  * it will return the block group that contains the bytenr
172  */
173 static struct btrfs_block_group_cache *
174 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
175                               int contains)
176 {
177         struct btrfs_block_group_cache *cache, *ret = NULL;
178         struct rb_node *n;
179         u64 end, start;
180
181         spin_lock(&info->block_group_cache_lock);
182         n = info->block_group_cache_tree.rb_node;
183
184         while (n) {
185                 cache = rb_entry(n, struct btrfs_block_group_cache,
186                                  cache_node);
187                 end = cache->key.objectid + cache->key.offset - 1;
188                 start = cache->key.objectid;
189
190                 if (bytenr < start) {
191                         if (!contains && (!ret || start < ret->key.objectid))
192                                 ret = cache;
193                         n = n->rb_left;
194                 } else if (bytenr > start) {
195                         if (contains && bytenr <= end) {
196                                 ret = cache;
197                                 break;
198                         }
199                         n = n->rb_right;
200                 } else {
201                         ret = cache;
202                         break;
203                 }
204         }
205         if (ret)
206                 btrfs_get_block_group(ret);
207         spin_unlock(&info->block_group_cache_lock);
208
209         return ret;
210 }
211
212 static int add_excluded_extent(struct btrfs_root *root,
213                                u64 start, u64 num_bytes)
214 {
215         u64 end = start + num_bytes - 1;
216         set_extent_bits(&root->fs_info->freed_extents[0],
217                         start, end, EXTENT_UPTODATE, GFP_NOFS);
218         set_extent_bits(&root->fs_info->freed_extents[1],
219                         start, end, EXTENT_UPTODATE, GFP_NOFS);
220         return 0;
221 }
222
223 static void free_excluded_extents(struct btrfs_root *root,
224                                   struct btrfs_block_group_cache *cache)
225 {
226         u64 start, end;
227
228         start = cache->key.objectid;
229         end = start + cache->key.offset - 1;
230
231         clear_extent_bits(&root->fs_info->freed_extents[0],
232                           start, end, EXTENT_UPTODATE, GFP_NOFS);
233         clear_extent_bits(&root->fs_info->freed_extents[1],
234                           start, end, EXTENT_UPTODATE, GFP_NOFS);
235 }
236
237 static int exclude_super_stripes(struct btrfs_root *root,
238                                  struct btrfs_block_group_cache *cache)
239 {
240         u64 bytenr;
241         u64 *logical;
242         int stripe_len;
243         int i, nr, ret;
244
245         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
246                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
247                 cache->bytes_super += stripe_len;
248                 ret = add_excluded_extent(root, cache->key.objectid,
249                                           stripe_len);
250                 BUG_ON(ret); /* -ENOMEM */
251         }
252
253         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
254                 bytenr = btrfs_sb_offset(i);
255                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
256                                        cache->key.objectid, bytenr,
257                                        0, &logical, &nr, &stripe_len);
258                 BUG_ON(ret); /* -ENOMEM */
259
260                 while (nr--) {
261                         cache->bytes_super += stripe_len;
262                         ret = add_excluded_extent(root, logical[nr],
263                                                   stripe_len);
264                         BUG_ON(ret); /* -ENOMEM */
265                 }
266
267                 kfree(logical);
268         }
269         return 0;
270 }
271
272 static struct btrfs_caching_control *
273 get_caching_control(struct btrfs_block_group_cache *cache)
274 {
275         struct btrfs_caching_control *ctl;
276
277         spin_lock(&cache->lock);
278         if (cache->cached != BTRFS_CACHE_STARTED) {
279                 spin_unlock(&cache->lock);
280                 return NULL;
281         }
282
283         /* We're loading it the fast way, so we don't have a caching_ctl. */
284         if (!cache->caching_ctl) {
285                 spin_unlock(&cache->lock);
286                 return NULL;
287         }
288
289         ctl = cache->caching_ctl;
290         atomic_inc(&ctl->count);
291         spin_unlock(&cache->lock);
292         return ctl;
293 }
294
295 static void put_caching_control(struct btrfs_caching_control *ctl)
296 {
297         if (atomic_dec_and_test(&ctl->count))
298                 kfree(ctl);
299 }
300
301 /*
302  * this is only called by cache_block_group, since we could have freed extents
303  * we need to check the pinned_extents for any extents that can't be used yet
304  * since their free space will be released as soon as the transaction commits.
305  */
306 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
307                               struct btrfs_fs_info *info, u64 start, u64 end)
308 {
309         u64 extent_start, extent_end, size, total_added = 0;
310         int ret;
311
312         while (start < end) {
313                 ret = find_first_extent_bit(info->pinned_extents, start,
314                                             &extent_start, &extent_end,
315                                             EXTENT_DIRTY | EXTENT_UPTODATE);
316                 if (ret)
317                         break;
318
319                 if (extent_start <= start) {
320                         start = extent_end + 1;
321                 } else if (extent_start > start && extent_start < end) {
322                         size = extent_start - start;
323                         total_added += size;
324                         ret = btrfs_add_free_space(block_group, start,
325                                                    size);
326                         BUG_ON(ret); /* -ENOMEM or logic error */
327                         start = extent_end + 1;
328                 } else {
329                         break;
330                 }
331         }
332
333         if (start < end) {
334                 size = end - start;
335                 total_added += size;
336                 ret = btrfs_add_free_space(block_group, start, size);
337                 BUG_ON(ret); /* -ENOMEM or logic error */
338         }
339
340         return total_added;
341 }
342
343 static noinline void caching_thread(struct btrfs_work *work)
344 {
345         struct btrfs_block_group_cache *block_group;
346         struct btrfs_fs_info *fs_info;
347         struct btrfs_caching_control *caching_ctl;
348         struct btrfs_root *extent_root;
349         struct btrfs_path *path;
350         struct extent_buffer *leaf;
351         struct btrfs_key key;
352         u64 total_found = 0;
353         u64 last = 0;
354         u32 nritems;
355         int ret = 0;
356
357         caching_ctl = container_of(work, struct btrfs_caching_control, work);
358         block_group = caching_ctl->block_group;
359         fs_info = block_group->fs_info;
360         extent_root = fs_info->extent_root;
361
362         path = btrfs_alloc_path();
363         if (!path)
364                 goto out;
365
366         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
367
368         /*
369          * We don't want to deadlock with somebody trying to allocate a new
370          * extent for the extent root while also trying to search the extent
371          * root to add free space.  So we skip locking and search the commit
372          * root, since its read-only
373          */
374         path->skip_locking = 1;
375         path->search_commit_root = 1;
376         path->reada = 1;
377
378         key.objectid = last;
379         key.offset = 0;
380         key.type = BTRFS_EXTENT_ITEM_KEY;
381 again:
382         mutex_lock(&caching_ctl->mutex);
383         /* need to make sure the commit_root doesn't disappear */
384         down_read(&fs_info->extent_commit_sem);
385
386         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
387         if (ret < 0)
388                 goto err;
389
390         leaf = path->nodes[0];
391         nritems = btrfs_header_nritems(leaf);
392
393         while (1) {
394                 if (btrfs_fs_closing(fs_info) > 1) {
395                         last = (u64)-1;
396                         break;
397                 }
398
399                 if (path->slots[0] < nritems) {
400                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
401                 } else {
402                         ret = find_next_key(path, 0, &key);
403                         if (ret)
404                                 break;
405
406                         if (need_resched() ||
407                             btrfs_next_leaf(extent_root, path)) {
408                                 caching_ctl->progress = last;
409                                 btrfs_release_path(path);
410                                 up_read(&fs_info->extent_commit_sem);
411                                 mutex_unlock(&caching_ctl->mutex);
412                                 cond_resched();
413                                 goto again;
414                         }
415                         leaf = path->nodes[0];
416                         nritems = btrfs_header_nritems(leaf);
417                         continue;
418                 }
419
420                 if (key.objectid < block_group->key.objectid) {
421                         path->slots[0]++;
422                         continue;
423                 }
424
425                 if (key.objectid >= block_group->key.objectid +
426                     block_group->key.offset)
427                         break;
428
429                 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
430                         total_found += add_new_free_space(block_group,
431                                                           fs_info, last,
432                                                           key.objectid);
433                         last = key.objectid + key.offset;
434
435                         if (total_found > (1024 * 1024 * 2)) {
436                                 total_found = 0;
437                                 wake_up(&caching_ctl->wait);
438                         }
439                 }
440                 path->slots[0]++;
441         }
442         ret = 0;
443
444         total_found += add_new_free_space(block_group, fs_info, last,
445                                           block_group->key.objectid +
446                                           block_group->key.offset);
447         caching_ctl->progress = (u64)-1;
448
449         spin_lock(&block_group->lock);
450         block_group->caching_ctl = NULL;
451         block_group->cached = BTRFS_CACHE_FINISHED;
452         spin_unlock(&block_group->lock);
453
454 err:
455         btrfs_free_path(path);
456         up_read(&fs_info->extent_commit_sem);
457
458         free_excluded_extents(extent_root, block_group);
459
460         mutex_unlock(&caching_ctl->mutex);
461 out:
462         wake_up(&caching_ctl->wait);
463
464         put_caching_control(caching_ctl);
465         btrfs_put_block_group(block_group);
466 }
467
468 static int cache_block_group(struct btrfs_block_group_cache *cache,
469                              struct btrfs_trans_handle *trans,
470                              struct btrfs_root *root,
471                              int load_cache_only)
472 {
473         DEFINE_WAIT(wait);
474         struct btrfs_fs_info *fs_info = cache->fs_info;
475         struct btrfs_caching_control *caching_ctl;
476         int ret = 0;
477
478         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
479         if (!caching_ctl)
480                 return -ENOMEM;
481
482         INIT_LIST_HEAD(&caching_ctl->list);
483         mutex_init(&caching_ctl->mutex);
484         init_waitqueue_head(&caching_ctl->wait);
485         caching_ctl->block_group = cache;
486         caching_ctl->progress = cache->key.objectid;
487         atomic_set(&caching_ctl->count, 1);
488         caching_ctl->work.func = caching_thread;
489
490         spin_lock(&cache->lock);
491         /*
492          * This should be a rare occasion, but this could happen I think in the
493          * case where one thread starts to load the space cache info, and then
494          * some other thread starts a transaction commit which tries to do an
495          * allocation while the other thread is still loading the space cache
496          * info.  The previous loop should have kept us from choosing this block
497          * group, but if we've moved to the state where we will wait on caching
498          * block groups we need to first check if we're doing a fast load here,
499          * so we can wait for it to finish, otherwise we could end up allocating
500          * from a block group who's cache gets evicted for one reason or
501          * another.
502          */
503         while (cache->cached == BTRFS_CACHE_FAST) {
504                 struct btrfs_caching_control *ctl;
505
506                 ctl = cache->caching_ctl;
507                 atomic_inc(&ctl->count);
508                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
509                 spin_unlock(&cache->lock);
510
511                 schedule();
512
513                 finish_wait(&ctl->wait, &wait);
514                 put_caching_control(ctl);
515                 spin_lock(&cache->lock);
516         }
517
518         if (cache->cached != BTRFS_CACHE_NO) {
519                 spin_unlock(&cache->lock);
520                 kfree(caching_ctl);
521                 return 0;
522         }
523         WARN_ON(cache->caching_ctl);
524         cache->caching_ctl = caching_ctl;
525         cache->cached = BTRFS_CACHE_FAST;
526         spin_unlock(&cache->lock);
527
528         /*
529          * We can't do the read from on-disk cache during a commit since we need
530          * to have the normal tree locking.  Also if we are currently trying to
531          * allocate blocks for the tree root we can't do the fast caching since
532          * we likely hold important locks.
533          */
534         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
535                 ret = load_free_space_cache(fs_info, cache);
536
537                 spin_lock(&cache->lock);
538                 if (ret == 1) {
539                         cache->caching_ctl = NULL;
540                         cache->cached = BTRFS_CACHE_FINISHED;
541                         cache->last_byte_to_unpin = (u64)-1;
542                 } else {
543                         if (load_cache_only) {
544                                 cache->caching_ctl = NULL;
545                                 cache->cached = BTRFS_CACHE_NO;
546                         } else {
547                                 cache->cached = BTRFS_CACHE_STARTED;
548                         }
549                 }
550                 spin_unlock(&cache->lock);
551                 wake_up(&caching_ctl->wait);
552                 if (ret == 1) {
553                         put_caching_control(caching_ctl);
554                         free_excluded_extents(fs_info->extent_root, cache);
555                         return 0;
556                 }
557         } else {
558                 /*
559                  * We are not going to do the fast caching, set cached to the
560                  * appropriate value and wakeup any waiters.
561                  */
562                 spin_lock(&cache->lock);
563                 if (load_cache_only) {
564                         cache->caching_ctl = NULL;
565                         cache->cached = BTRFS_CACHE_NO;
566                 } else {
567                         cache->cached = BTRFS_CACHE_STARTED;
568                 }
569                 spin_unlock(&cache->lock);
570                 wake_up(&caching_ctl->wait);
571         }
572
573         if (load_cache_only) {
574                 put_caching_control(caching_ctl);
575                 return 0;
576         }
577
578         down_write(&fs_info->extent_commit_sem);
579         atomic_inc(&caching_ctl->count);
580         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
581         up_write(&fs_info->extent_commit_sem);
582
583         btrfs_get_block_group(cache);
584
585         btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
586
587         return ret;
588 }
589
590 /*
591  * return the block group that starts at or after bytenr
592  */
593 static struct btrfs_block_group_cache *
594 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
595 {
596         struct btrfs_block_group_cache *cache;
597
598         cache = block_group_cache_tree_search(info, bytenr, 0);
599
600         return cache;
601 }
602
603 /*
604  * return the block group that contains the given bytenr
605  */
606 struct btrfs_block_group_cache *btrfs_lookup_block_group(
607                                                  struct btrfs_fs_info *info,
608                                                  u64 bytenr)
609 {
610         struct btrfs_block_group_cache *cache;
611
612         cache = block_group_cache_tree_search(info, bytenr, 1);
613
614         return cache;
615 }
616
617 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
618                                                   u64 flags)
619 {
620         struct list_head *head = &info->space_info;
621         struct btrfs_space_info *found;
622
623         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
624
625         rcu_read_lock();
626         list_for_each_entry_rcu(found, head, list) {
627                 if (found->flags & flags) {
628                         rcu_read_unlock();
629                         return found;
630                 }
631         }
632         rcu_read_unlock();
633         return NULL;
634 }
635
636 /*
637  * after adding space to the filesystem, we need to clear the full flags
638  * on all the space infos.
639  */
640 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
641 {
642         struct list_head *head = &info->space_info;
643         struct btrfs_space_info *found;
644
645         rcu_read_lock();
646         list_for_each_entry_rcu(found, head, list)
647                 found->full = 0;
648         rcu_read_unlock();
649 }
650
651 static u64 div_factor(u64 num, int factor)
652 {
653         if (factor == 10)
654                 return num;
655         num *= factor;
656         do_div(num, 10);
657         return num;
658 }
659
660 static u64 div_factor_fine(u64 num, int factor)
661 {
662         if (factor == 100)
663                 return num;
664         num *= factor;
665         do_div(num, 100);
666         return num;
667 }
668
669 u64 btrfs_find_block_group(struct btrfs_root *root,
670                            u64 search_start, u64 search_hint, int owner)
671 {
672         struct btrfs_block_group_cache *cache;
673         u64 used;
674         u64 last = max(search_hint, search_start);
675         u64 group_start = 0;
676         int full_search = 0;
677         int factor = 9;
678         int wrapped = 0;
679 again:
680         while (1) {
681                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
682                 if (!cache)
683                         break;
684
685                 spin_lock(&cache->lock);
686                 last = cache->key.objectid + cache->key.offset;
687                 used = btrfs_block_group_used(&cache->item);
688
689                 if ((full_search || !cache->ro) &&
690                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
691                         if (used + cache->pinned + cache->reserved <
692                             div_factor(cache->key.offset, factor)) {
693                                 group_start = cache->key.objectid;
694                                 spin_unlock(&cache->lock);
695                                 btrfs_put_block_group(cache);
696                                 goto found;
697                         }
698                 }
699                 spin_unlock(&cache->lock);
700                 btrfs_put_block_group(cache);
701                 cond_resched();
702         }
703         if (!wrapped) {
704                 last = search_start;
705                 wrapped = 1;
706                 goto again;
707         }
708         if (!full_search && factor < 10) {
709                 last = search_start;
710                 full_search = 1;
711                 factor = 10;
712                 goto again;
713         }
714 found:
715         return group_start;
716 }
717
718 /* simple helper to search for an existing extent at a given offset */
719 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
720 {
721         int ret;
722         struct btrfs_key key;
723         struct btrfs_path *path;
724
725         path = btrfs_alloc_path();
726         if (!path)
727                 return -ENOMEM;
728
729         key.objectid = start;
730         key.offset = len;
731         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
732         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
733                                 0, 0);
734         btrfs_free_path(path);
735         return ret;
736 }
737
738 /*
739  * helper function to lookup reference count and flags of extent.
740  *
741  * the head node for delayed ref is used to store the sum of all the
742  * reference count modifications queued up in the rbtree. the head
743  * node may also store the extent flags to set. This way you can check
744  * to see what the reference count and extent flags would be if all of
745  * the delayed refs are not processed.
746  */
747 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
748                              struct btrfs_root *root, u64 bytenr,
749                              u64 num_bytes, u64 *refs, u64 *flags)
750 {
751         struct btrfs_delayed_ref_head *head;
752         struct btrfs_delayed_ref_root *delayed_refs;
753         struct btrfs_path *path;
754         struct btrfs_extent_item *ei;
755         struct extent_buffer *leaf;
756         struct btrfs_key key;
757         u32 item_size;
758         u64 num_refs;
759         u64 extent_flags;
760         int ret;
761
762         path = btrfs_alloc_path();
763         if (!path)
764                 return -ENOMEM;
765
766         key.objectid = bytenr;
767         key.type = BTRFS_EXTENT_ITEM_KEY;
768         key.offset = num_bytes;
769         if (!trans) {
770                 path->skip_locking = 1;
771                 path->search_commit_root = 1;
772         }
773 again:
774         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
775                                 &key, path, 0, 0);
776         if (ret < 0)
777                 goto out_free;
778
779         if (ret == 0) {
780                 leaf = path->nodes[0];
781                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
782                 if (item_size >= sizeof(*ei)) {
783                         ei = btrfs_item_ptr(leaf, path->slots[0],
784                                             struct btrfs_extent_item);
785                         num_refs = btrfs_extent_refs(leaf, ei);
786                         extent_flags = btrfs_extent_flags(leaf, ei);
787                 } else {
788 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
789                         struct btrfs_extent_item_v0 *ei0;
790                         BUG_ON(item_size != sizeof(*ei0));
791                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
792                                              struct btrfs_extent_item_v0);
793                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
794                         /* FIXME: this isn't correct for data */
795                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
796 #else
797                         BUG();
798 #endif
799                 }
800                 BUG_ON(num_refs == 0);
801         } else {
802                 num_refs = 0;
803                 extent_flags = 0;
804                 ret = 0;
805         }
806
807         if (!trans)
808                 goto out;
809
810         delayed_refs = &trans->transaction->delayed_refs;
811         spin_lock(&delayed_refs->lock);
812         head = btrfs_find_delayed_ref_head(trans, bytenr);
813         if (head) {
814                 if (!mutex_trylock(&head->mutex)) {
815                         atomic_inc(&head->node.refs);
816                         spin_unlock(&delayed_refs->lock);
817
818                         btrfs_release_path(path);
819
820                         /*
821                          * Mutex was contended, block until it's released and try
822                          * again
823                          */
824                         mutex_lock(&head->mutex);
825                         mutex_unlock(&head->mutex);
826                         btrfs_put_delayed_ref(&head->node);
827                         goto again;
828                 }
829                 if (head->extent_op && head->extent_op->update_flags)
830                         extent_flags |= head->extent_op->flags_to_set;
831                 else
832                         BUG_ON(num_refs == 0);
833
834                 num_refs += head->node.ref_mod;
835                 mutex_unlock(&head->mutex);
836         }
837         spin_unlock(&delayed_refs->lock);
838 out:
839         WARN_ON(num_refs == 0);
840         if (refs)
841                 *refs = num_refs;
842         if (flags)
843                 *flags = extent_flags;
844 out_free:
845         btrfs_free_path(path);
846         return ret;
847 }
848
849 /*
850  * Back reference rules.  Back refs have three main goals:
851  *
852  * 1) differentiate between all holders of references to an extent so that
853  *    when a reference is dropped we can make sure it was a valid reference
854  *    before freeing the extent.
855  *
856  * 2) Provide enough information to quickly find the holders of an extent
857  *    if we notice a given block is corrupted or bad.
858  *
859  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
860  *    maintenance.  This is actually the same as #2, but with a slightly
861  *    different use case.
862  *
863  * There are two kinds of back refs. The implicit back refs is optimized
864  * for pointers in non-shared tree blocks. For a given pointer in a block,
865  * back refs of this kind provide information about the block's owner tree
866  * and the pointer's key. These information allow us to find the block by
867  * b-tree searching. The full back refs is for pointers in tree blocks not
868  * referenced by their owner trees. The location of tree block is recorded
869  * in the back refs. Actually the full back refs is generic, and can be
870  * used in all cases the implicit back refs is used. The major shortcoming
871  * of the full back refs is its overhead. Every time a tree block gets
872  * COWed, we have to update back refs entry for all pointers in it.
873  *
874  * For a newly allocated tree block, we use implicit back refs for
875  * pointers in it. This means most tree related operations only involve
876  * implicit back refs. For a tree block created in old transaction, the
877  * only way to drop a reference to it is COW it. So we can detect the
878  * event that tree block loses its owner tree's reference and do the
879  * back refs conversion.
880  *
881  * When a tree block is COW'd through a tree, there are four cases:
882  *
883  * The reference count of the block is one and the tree is the block's
884  * owner tree. Nothing to do in this case.
885  *
886  * The reference count of the block is one and the tree is not the
887  * block's owner tree. In this case, full back refs is used for pointers
888  * in the block. Remove these full back refs, add implicit back refs for
889  * every pointers in the new block.
890  *
891  * The reference count of the block is greater than one and the tree is
892  * the block's owner tree. In this case, implicit back refs is used for
893  * pointers in the block. Add full back refs for every pointers in the
894  * block, increase lower level extents' reference counts. The original
895  * implicit back refs are entailed to the new block.
896  *
897  * The reference count of the block is greater than one and the tree is
898  * not the block's owner tree. Add implicit back refs for every pointer in
899  * the new block, increase lower level extents' reference count.
900  *
901  * Back Reference Key composing:
902  *
903  * The key objectid corresponds to the first byte in the extent,
904  * The key type is used to differentiate between types of back refs.
905  * There are different meanings of the key offset for different types
906  * of back refs.
907  *
908  * File extents can be referenced by:
909  *
910  * - multiple snapshots, subvolumes, or different generations in one subvol
911  * - different files inside a single subvolume
912  * - different offsets inside a file (bookend extents in file.c)
913  *
914  * The extent ref structure for the implicit back refs has fields for:
915  *
916  * - Objectid of the subvolume root
917  * - objectid of the file holding the reference
918  * - original offset in the file
919  * - how many bookend extents
920  *
921  * The key offset for the implicit back refs is hash of the first
922  * three fields.
923  *
924  * The extent ref structure for the full back refs has field for:
925  *
926  * - number of pointers in the tree leaf
927  *
928  * The key offset for the implicit back refs is the first byte of
929  * the tree leaf
930  *
931  * When a file extent is allocated, The implicit back refs is used.
932  * the fields are filled in:
933  *
934  *     (root_key.objectid, inode objectid, offset in file, 1)
935  *
936  * When a file extent is removed file truncation, we find the
937  * corresponding implicit back refs and check the following fields:
938  *
939  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
940  *
941  * Btree extents can be referenced by:
942  *
943  * - Different subvolumes
944  *
945  * Both the implicit back refs and the full back refs for tree blocks
946  * only consist of key. The key offset for the implicit back refs is
947  * objectid of block's owner tree. The key offset for the full back refs
948  * is the first byte of parent block.
949  *
950  * When implicit back refs is used, information about the lowest key and
951  * level of the tree block are required. These information are stored in
952  * tree block info structure.
953  */
954
955 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
956 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
957                                   struct btrfs_root *root,
958                                   struct btrfs_path *path,
959                                   u64 owner, u32 extra_size)
960 {
961         struct btrfs_extent_item *item;
962         struct btrfs_extent_item_v0 *ei0;
963         struct btrfs_extent_ref_v0 *ref0;
964         struct btrfs_tree_block_info *bi;
965         struct extent_buffer *leaf;
966         struct btrfs_key key;
967         struct btrfs_key found_key;
968         u32 new_size = sizeof(*item);
969         u64 refs;
970         int ret;
971
972         leaf = path->nodes[0];
973         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
974
975         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
976         ei0 = btrfs_item_ptr(leaf, path->slots[0],
977                              struct btrfs_extent_item_v0);
978         refs = btrfs_extent_refs_v0(leaf, ei0);
979
980         if (owner == (u64)-1) {
981                 while (1) {
982                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
983                                 ret = btrfs_next_leaf(root, path);
984                                 if (ret < 0)
985                                         return ret;
986                                 BUG_ON(ret > 0); /* Corruption */
987                                 leaf = path->nodes[0];
988                         }
989                         btrfs_item_key_to_cpu(leaf, &found_key,
990                                               path->slots[0]);
991                         BUG_ON(key.objectid != found_key.objectid);
992                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
993                                 path->slots[0]++;
994                                 continue;
995                         }
996                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
997                                               struct btrfs_extent_ref_v0);
998                         owner = btrfs_ref_objectid_v0(leaf, ref0);
999                         break;
1000                 }
1001         }
1002         btrfs_release_path(path);
1003
1004         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1005                 new_size += sizeof(*bi);
1006
1007         new_size -= sizeof(*ei0);
1008         ret = btrfs_search_slot(trans, root, &key, path,
1009                                 new_size + extra_size, 1);
1010         if (ret < 0)
1011                 return ret;
1012         BUG_ON(ret); /* Corruption */
1013
1014         btrfs_extend_item(trans, root, path, new_size);
1015
1016         leaf = path->nodes[0];
1017         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1018         btrfs_set_extent_refs(leaf, item, refs);
1019         /* FIXME: get real generation */
1020         btrfs_set_extent_generation(leaf, item, 0);
1021         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1022                 btrfs_set_extent_flags(leaf, item,
1023                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1024                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1025                 bi = (struct btrfs_tree_block_info *)(item + 1);
1026                 /* FIXME: get first key of the block */
1027                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1028                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1029         } else {
1030                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1031         }
1032         btrfs_mark_buffer_dirty(leaf);
1033         return 0;
1034 }
1035 #endif
1036
1037 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1038 {
1039         u32 high_crc = ~(u32)0;
1040         u32 low_crc = ~(u32)0;
1041         __le64 lenum;
1042
1043         lenum = cpu_to_le64(root_objectid);
1044         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1045         lenum = cpu_to_le64(owner);
1046         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1047         lenum = cpu_to_le64(offset);
1048         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1049
1050         return ((u64)high_crc << 31) ^ (u64)low_crc;
1051 }
1052
1053 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1054                                      struct btrfs_extent_data_ref *ref)
1055 {
1056         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1057                                     btrfs_extent_data_ref_objectid(leaf, ref),
1058                                     btrfs_extent_data_ref_offset(leaf, ref));
1059 }
1060
1061 static int match_extent_data_ref(struct extent_buffer *leaf,
1062                                  struct btrfs_extent_data_ref *ref,
1063                                  u64 root_objectid, u64 owner, u64 offset)
1064 {
1065         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1066             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1067             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1068                 return 0;
1069         return 1;
1070 }
1071
1072 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1073                                            struct btrfs_root *root,
1074                                            struct btrfs_path *path,
1075                                            u64 bytenr, u64 parent,
1076                                            u64 root_objectid,
1077                                            u64 owner, u64 offset)
1078 {
1079         struct btrfs_key key;
1080         struct btrfs_extent_data_ref *ref;
1081         struct extent_buffer *leaf;
1082         u32 nritems;
1083         int ret;
1084         int recow;
1085         int err = -ENOENT;
1086
1087         key.objectid = bytenr;
1088         if (parent) {
1089                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1090                 key.offset = parent;
1091         } else {
1092                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1093                 key.offset = hash_extent_data_ref(root_objectid,
1094                                                   owner, offset);
1095         }
1096 again:
1097         recow = 0;
1098         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1099         if (ret < 0) {
1100                 err = ret;
1101                 goto fail;
1102         }
1103
1104         if (parent) {
1105                 if (!ret)
1106                         return 0;
1107 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1108                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1109                 btrfs_release_path(path);
1110                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1111                 if (ret < 0) {
1112                         err = ret;
1113                         goto fail;
1114                 }
1115                 if (!ret)
1116                         return 0;
1117 #endif
1118                 goto fail;
1119         }
1120
1121         leaf = path->nodes[0];
1122         nritems = btrfs_header_nritems(leaf);
1123         while (1) {
1124                 if (path->slots[0] >= nritems) {
1125                         ret = btrfs_next_leaf(root, path);
1126                         if (ret < 0)
1127                                 err = ret;
1128                         if (ret)
1129                                 goto fail;
1130
1131                         leaf = path->nodes[0];
1132                         nritems = btrfs_header_nritems(leaf);
1133                         recow = 1;
1134                 }
1135
1136                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1137                 if (key.objectid != bytenr ||
1138                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1139                         goto fail;
1140
1141                 ref = btrfs_item_ptr(leaf, path->slots[0],
1142                                      struct btrfs_extent_data_ref);
1143
1144                 if (match_extent_data_ref(leaf, ref, root_objectid,
1145                                           owner, offset)) {
1146                         if (recow) {
1147                                 btrfs_release_path(path);
1148                                 goto again;
1149                         }
1150                         err = 0;
1151                         break;
1152                 }
1153                 path->slots[0]++;
1154         }
1155 fail:
1156         return err;
1157 }
1158
1159 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1160                                            struct btrfs_root *root,
1161                                            struct btrfs_path *path,
1162                                            u64 bytenr, u64 parent,
1163                                            u64 root_objectid, u64 owner,
1164                                            u64 offset, int refs_to_add)
1165 {
1166         struct btrfs_key key;
1167         struct extent_buffer *leaf;
1168         u32 size;
1169         u32 num_refs;
1170         int ret;
1171
1172         key.objectid = bytenr;
1173         if (parent) {
1174                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1175                 key.offset = parent;
1176                 size = sizeof(struct btrfs_shared_data_ref);
1177         } else {
1178                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1179                 key.offset = hash_extent_data_ref(root_objectid,
1180                                                   owner, offset);
1181                 size = sizeof(struct btrfs_extent_data_ref);
1182         }
1183
1184         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1185         if (ret && ret != -EEXIST)
1186                 goto fail;
1187
1188         leaf = path->nodes[0];
1189         if (parent) {
1190                 struct btrfs_shared_data_ref *ref;
1191                 ref = btrfs_item_ptr(leaf, path->slots[0],
1192                                      struct btrfs_shared_data_ref);
1193                 if (ret == 0) {
1194                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1195                 } else {
1196                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1197                         num_refs += refs_to_add;
1198                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1199                 }
1200         } else {
1201                 struct btrfs_extent_data_ref *ref;
1202                 while (ret == -EEXIST) {
1203                         ref = btrfs_item_ptr(leaf, path->slots[0],
1204                                              struct btrfs_extent_data_ref);
1205                         if (match_extent_data_ref(leaf, ref, root_objectid,
1206                                                   owner, offset))
1207                                 break;
1208                         btrfs_release_path(path);
1209                         key.offset++;
1210                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1211                                                       size);
1212                         if (ret && ret != -EEXIST)
1213                                 goto fail;
1214
1215                         leaf = path->nodes[0];
1216                 }
1217                 ref = btrfs_item_ptr(leaf, path->slots[0],
1218                                      struct btrfs_extent_data_ref);
1219                 if (ret == 0) {
1220                         btrfs_set_extent_data_ref_root(leaf, ref,
1221                                                        root_objectid);
1222                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1223                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1224                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1225                 } else {
1226                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1227                         num_refs += refs_to_add;
1228                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1229                 }
1230         }
1231         btrfs_mark_buffer_dirty(leaf);
1232         ret = 0;
1233 fail:
1234         btrfs_release_path(path);
1235         return ret;
1236 }
1237
1238 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1239                                            struct btrfs_root *root,
1240                                            struct btrfs_path *path,
1241                                            int refs_to_drop)
1242 {
1243         struct btrfs_key key;
1244         struct btrfs_extent_data_ref *ref1 = NULL;
1245         struct btrfs_shared_data_ref *ref2 = NULL;
1246         struct extent_buffer *leaf;
1247         u32 num_refs = 0;
1248         int ret = 0;
1249
1250         leaf = path->nodes[0];
1251         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1252
1253         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1254                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1255                                       struct btrfs_extent_data_ref);
1256                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1257         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1258                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1259                                       struct btrfs_shared_data_ref);
1260                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1261 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1262         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1263                 struct btrfs_extent_ref_v0 *ref0;
1264                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1265                                       struct btrfs_extent_ref_v0);
1266                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1267 #endif
1268         } else {
1269                 BUG();
1270         }
1271
1272         BUG_ON(num_refs < refs_to_drop);
1273         num_refs -= refs_to_drop;
1274
1275         if (num_refs == 0) {
1276                 ret = btrfs_del_item(trans, root, path);
1277         } else {
1278                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1279                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1280                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1281                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1282 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1283                 else {
1284                         struct btrfs_extent_ref_v0 *ref0;
1285                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1286                                         struct btrfs_extent_ref_v0);
1287                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1288                 }
1289 #endif
1290                 btrfs_mark_buffer_dirty(leaf);
1291         }
1292         return ret;
1293 }
1294
1295 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1296                                           struct btrfs_path *path,
1297                                           struct btrfs_extent_inline_ref *iref)
1298 {
1299         struct btrfs_key key;
1300         struct extent_buffer *leaf;
1301         struct btrfs_extent_data_ref *ref1;
1302         struct btrfs_shared_data_ref *ref2;
1303         u32 num_refs = 0;
1304
1305         leaf = path->nodes[0];
1306         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1307         if (iref) {
1308                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1309                     BTRFS_EXTENT_DATA_REF_KEY) {
1310                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1311                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1312                 } else {
1313                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1314                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1315                 }
1316         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1317                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1318                                       struct btrfs_extent_data_ref);
1319                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1320         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1321                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1322                                       struct btrfs_shared_data_ref);
1323                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1324 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1325         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1326                 struct btrfs_extent_ref_v0 *ref0;
1327                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1328                                       struct btrfs_extent_ref_v0);
1329                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1330 #endif
1331         } else {
1332                 WARN_ON(1);
1333         }
1334         return num_refs;
1335 }
1336
1337 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1338                                           struct btrfs_root *root,
1339                                           struct btrfs_path *path,
1340                                           u64 bytenr, u64 parent,
1341                                           u64 root_objectid)
1342 {
1343         struct btrfs_key key;
1344         int ret;
1345
1346         key.objectid = bytenr;
1347         if (parent) {
1348                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1349                 key.offset = parent;
1350         } else {
1351                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1352                 key.offset = root_objectid;
1353         }
1354
1355         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1356         if (ret > 0)
1357                 ret = -ENOENT;
1358 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1359         if (ret == -ENOENT && parent) {
1360                 btrfs_release_path(path);
1361                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1362                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1363                 if (ret > 0)
1364                         ret = -ENOENT;
1365         }
1366 #endif
1367         return ret;
1368 }
1369
1370 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1371                                           struct btrfs_root *root,
1372                                           struct btrfs_path *path,
1373                                           u64 bytenr, u64 parent,
1374                                           u64 root_objectid)
1375 {
1376         struct btrfs_key key;
1377         int ret;
1378
1379         key.objectid = bytenr;
1380         if (parent) {
1381                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1382                 key.offset = parent;
1383         } else {
1384                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1385                 key.offset = root_objectid;
1386         }
1387
1388         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1389         btrfs_release_path(path);
1390         return ret;
1391 }
1392
1393 static inline int extent_ref_type(u64 parent, u64 owner)
1394 {
1395         int type;
1396         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1397                 if (parent > 0)
1398                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1399                 else
1400                         type = BTRFS_TREE_BLOCK_REF_KEY;
1401         } else {
1402                 if (parent > 0)
1403                         type = BTRFS_SHARED_DATA_REF_KEY;
1404                 else
1405                         type = BTRFS_EXTENT_DATA_REF_KEY;
1406         }
1407         return type;
1408 }
1409
1410 static int find_next_key(struct btrfs_path *path, int level,
1411                          struct btrfs_key *key)
1412
1413 {
1414         for (; level < BTRFS_MAX_LEVEL; level++) {
1415                 if (!path->nodes[level])
1416                         break;
1417                 if (path->slots[level] + 1 >=
1418                     btrfs_header_nritems(path->nodes[level]))
1419                         continue;
1420                 if (level == 0)
1421                         btrfs_item_key_to_cpu(path->nodes[level], key,
1422                                               path->slots[level] + 1);
1423                 else
1424                         btrfs_node_key_to_cpu(path->nodes[level], key,
1425                                               path->slots[level] + 1);
1426                 return 0;
1427         }
1428         return 1;
1429 }
1430
1431 /*
1432  * look for inline back ref. if back ref is found, *ref_ret is set
1433  * to the address of inline back ref, and 0 is returned.
1434  *
1435  * if back ref isn't found, *ref_ret is set to the address where it
1436  * should be inserted, and -ENOENT is returned.
1437  *
1438  * if insert is true and there are too many inline back refs, the path
1439  * points to the extent item, and -EAGAIN is returned.
1440  *
1441  * NOTE: inline back refs are ordered in the same way that back ref
1442  *       items in the tree are ordered.
1443  */
1444 static noinline_for_stack
1445 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1446                                  struct btrfs_root *root,
1447                                  struct btrfs_path *path,
1448                                  struct btrfs_extent_inline_ref **ref_ret,
1449                                  u64 bytenr, u64 num_bytes,
1450                                  u64 parent, u64 root_objectid,
1451                                  u64 owner, u64 offset, int insert)
1452 {
1453         struct btrfs_key key;
1454         struct extent_buffer *leaf;
1455         struct btrfs_extent_item *ei;
1456         struct btrfs_extent_inline_ref *iref;
1457         u64 flags;
1458         u64 item_size;
1459         unsigned long ptr;
1460         unsigned long end;
1461         int extra_size;
1462         int type;
1463         int want;
1464         int ret;
1465         int err = 0;
1466
1467         key.objectid = bytenr;
1468         key.type = BTRFS_EXTENT_ITEM_KEY;
1469         key.offset = num_bytes;
1470
1471         want = extent_ref_type(parent, owner);
1472         if (insert) {
1473                 extra_size = btrfs_extent_inline_ref_size(want);
1474                 path->keep_locks = 1;
1475         } else
1476                 extra_size = -1;
1477         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1478         if (ret < 0) {
1479                 err = ret;
1480                 goto out;
1481         }
1482         if (ret && !insert) {
1483                 err = -ENOENT;
1484                 goto out;
1485         }
1486         BUG_ON(ret); /* Corruption */
1487
1488         leaf = path->nodes[0];
1489         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1490 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1491         if (item_size < sizeof(*ei)) {
1492                 if (!insert) {
1493                         err = -ENOENT;
1494                         goto out;
1495                 }
1496                 ret = convert_extent_item_v0(trans, root, path, owner,
1497                                              extra_size);
1498                 if (ret < 0) {
1499                         err = ret;
1500                         goto out;
1501                 }
1502                 leaf = path->nodes[0];
1503                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1504         }
1505 #endif
1506         BUG_ON(item_size < sizeof(*ei));
1507
1508         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1509         flags = btrfs_extent_flags(leaf, ei);
1510
1511         ptr = (unsigned long)(ei + 1);
1512         end = (unsigned long)ei + item_size;
1513
1514         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1515                 ptr += sizeof(struct btrfs_tree_block_info);
1516                 BUG_ON(ptr > end);
1517         } else {
1518                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1519         }
1520
1521         err = -ENOENT;
1522         while (1) {
1523                 if (ptr >= end) {
1524                         WARN_ON(ptr > end);
1525                         break;
1526                 }
1527                 iref = (struct btrfs_extent_inline_ref *)ptr;
1528                 type = btrfs_extent_inline_ref_type(leaf, iref);
1529                 if (want < type)
1530                         break;
1531                 if (want > type) {
1532                         ptr += btrfs_extent_inline_ref_size(type);
1533                         continue;
1534                 }
1535
1536                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1537                         struct btrfs_extent_data_ref *dref;
1538                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1539                         if (match_extent_data_ref(leaf, dref, root_objectid,
1540                                                   owner, offset)) {
1541                                 err = 0;
1542                                 break;
1543                         }
1544                         if (hash_extent_data_ref_item(leaf, dref) <
1545                             hash_extent_data_ref(root_objectid, owner, offset))
1546                                 break;
1547                 } else {
1548                         u64 ref_offset;
1549                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1550                         if (parent > 0) {
1551                                 if (parent == ref_offset) {
1552                                         err = 0;
1553                                         break;
1554                                 }
1555                                 if (ref_offset < parent)
1556                                         break;
1557                         } else {
1558                                 if (root_objectid == ref_offset) {
1559                                         err = 0;
1560                                         break;
1561                                 }
1562                                 if (ref_offset < root_objectid)
1563                                         break;
1564                         }
1565                 }
1566                 ptr += btrfs_extent_inline_ref_size(type);
1567         }
1568         if (err == -ENOENT && insert) {
1569                 if (item_size + extra_size >=
1570                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1571                         err = -EAGAIN;
1572                         goto out;
1573                 }
1574                 /*
1575                  * To add new inline back ref, we have to make sure
1576                  * there is no corresponding back ref item.
1577                  * For simplicity, we just do not add new inline back
1578                  * ref if there is any kind of item for this block
1579                  */
1580                 if (find_next_key(path, 0, &key) == 0 &&
1581                     key.objectid == bytenr &&
1582                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1583                         err = -EAGAIN;
1584                         goto out;
1585                 }
1586         }
1587         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1588 out:
1589         if (insert) {
1590                 path->keep_locks = 0;
1591                 btrfs_unlock_up_safe(path, 1);
1592         }
1593         return err;
1594 }
1595
1596 /*
1597  * helper to add new inline back ref
1598  */
1599 static noinline_for_stack
1600 void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1601                                  struct btrfs_root *root,
1602                                  struct btrfs_path *path,
1603                                  struct btrfs_extent_inline_ref *iref,
1604                                  u64 parent, u64 root_objectid,
1605                                  u64 owner, u64 offset, int refs_to_add,
1606                                  struct btrfs_delayed_extent_op *extent_op)
1607 {
1608         struct extent_buffer *leaf;
1609         struct btrfs_extent_item *ei;
1610         unsigned long ptr;
1611         unsigned long end;
1612         unsigned long item_offset;
1613         u64 refs;
1614         int size;
1615         int type;
1616
1617         leaf = path->nodes[0];
1618         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1619         item_offset = (unsigned long)iref - (unsigned long)ei;
1620
1621         type = extent_ref_type(parent, owner);
1622         size = btrfs_extent_inline_ref_size(type);
1623
1624         btrfs_extend_item(trans, root, path, size);
1625
1626         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1627         refs = btrfs_extent_refs(leaf, ei);
1628         refs += refs_to_add;
1629         btrfs_set_extent_refs(leaf, ei, refs);
1630         if (extent_op)
1631                 __run_delayed_extent_op(extent_op, leaf, ei);
1632
1633         ptr = (unsigned long)ei + item_offset;
1634         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1635         if (ptr < end - size)
1636                 memmove_extent_buffer(leaf, ptr + size, ptr,
1637                                       end - size - ptr);
1638
1639         iref = (struct btrfs_extent_inline_ref *)ptr;
1640         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1641         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1642                 struct btrfs_extent_data_ref *dref;
1643                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1644                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1645                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1646                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1647                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1648         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1649                 struct btrfs_shared_data_ref *sref;
1650                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1651                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1652                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1653         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1654                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1655         } else {
1656                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1657         }
1658         btrfs_mark_buffer_dirty(leaf);
1659 }
1660
1661 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1662                                  struct btrfs_root *root,
1663                                  struct btrfs_path *path,
1664                                  struct btrfs_extent_inline_ref **ref_ret,
1665                                  u64 bytenr, u64 num_bytes, u64 parent,
1666                                  u64 root_objectid, u64 owner, u64 offset)
1667 {
1668         int ret;
1669
1670         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1671                                            bytenr, num_bytes, parent,
1672                                            root_objectid, owner, offset, 0);
1673         if (ret != -ENOENT)
1674                 return ret;
1675
1676         btrfs_release_path(path);
1677         *ref_ret = NULL;
1678
1679         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1680                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1681                                             root_objectid);
1682         } else {
1683                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1684                                              root_objectid, owner, offset);
1685         }
1686         return ret;
1687 }
1688
1689 /*
1690  * helper to update/remove inline back ref
1691  */
1692 static noinline_for_stack
1693 void update_inline_extent_backref(struct btrfs_trans_handle *trans,
1694                                   struct btrfs_root *root,
1695                                   struct btrfs_path *path,
1696                                   struct btrfs_extent_inline_ref *iref,
1697                                   int refs_to_mod,
1698                                   struct btrfs_delayed_extent_op *extent_op)
1699 {
1700         struct extent_buffer *leaf;
1701         struct btrfs_extent_item *ei;
1702         struct btrfs_extent_data_ref *dref = NULL;
1703         struct btrfs_shared_data_ref *sref = NULL;
1704         unsigned long ptr;
1705         unsigned long end;
1706         u32 item_size;
1707         int size;
1708         int type;
1709         u64 refs;
1710
1711         leaf = path->nodes[0];
1712         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1713         refs = btrfs_extent_refs(leaf, ei);
1714         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1715         refs += refs_to_mod;
1716         btrfs_set_extent_refs(leaf, ei, refs);
1717         if (extent_op)
1718                 __run_delayed_extent_op(extent_op, leaf, ei);
1719
1720         type = btrfs_extent_inline_ref_type(leaf, iref);
1721
1722         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1723                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1724                 refs = btrfs_extent_data_ref_count(leaf, dref);
1725         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1726                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1727                 refs = btrfs_shared_data_ref_count(leaf, sref);
1728         } else {
1729                 refs = 1;
1730                 BUG_ON(refs_to_mod != -1);
1731         }
1732
1733         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1734         refs += refs_to_mod;
1735
1736         if (refs > 0) {
1737                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1738                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1739                 else
1740                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1741         } else {
1742                 size =  btrfs_extent_inline_ref_size(type);
1743                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1744                 ptr = (unsigned long)iref;
1745                 end = (unsigned long)ei + item_size;
1746                 if (ptr + size < end)
1747                         memmove_extent_buffer(leaf, ptr, ptr + size,
1748                                               end - ptr - size);
1749                 item_size -= size;
1750                 btrfs_truncate_item(trans, root, path, item_size, 1);
1751         }
1752         btrfs_mark_buffer_dirty(leaf);
1753 }
1754
1755 static noinline_for_stack
1756 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1757                                  struct btrfs_root *root,
1758                                  struct btrfs_path *path,
1759                                  u64 bytenr, u64 num_bytes, u64 parent,
1760                                  u64 root_objectid, u64 owner,
1761                                  u64 offset, int refs_to_add,
1762                                  struct btrfs_delayed_extent_op *extent_op)
1763 {
1764         struct btrfs_extent_inline_ref *iref;
1765         int ret;
1766
1767         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1768                                            bytenr, num_bytes, parent,
1769                                            root_objectid, owner, offset, 1);
1770         if (ret == 0) {
1771                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1772                 update_inline_extent_backref(trans, root, path, iref,
1773                                              refs_to_add, extent_op);
1774         } else if (ret == -ENOENT) {
1775                 setup_inline_extent_backref(trans, root, path, iref, parent,
1776                                             root_objectid, owner, offset,
1777                                             refs_to_add, extent_op);
1778                 ret = 0;
1779         }
1780         return ret;
1781 }
1782
1783 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1784                                  struct btrfs_root *root,
1785                                  struct btrfs_path *path,
1786                                  u64 bytenr, u64 parent, u64 root_objectid,
1787                                  u64 owner, u64 offset, int refs_to_add)
1788 {
1789         int ret;
1790         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1791                 BUG_ON(refs_to_add != 1);
1792                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1793                                             parent, root_objectid);
1794         } else {
1795                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1796                                              parent, root_objectid,
1797                                              owner, offset, refs_to_add);
1798         }
1799         return ret;
1800 }
1801
1802 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1803                                  struct btrfs_root *root,
1804                                  struct btrfs_path *path,
1805                                  struct btrfs_extent_inline_ref *iref,
1806                                  int refs_to_drop, int is_data)
1807 {
1808         int ret = 0;
1809
1810         BUG_ON(!is_data && refs_to_drop != 1);
1811         if (iref) {
1812                 update_inline_extent_backref(trans, root, path, iref,
1813                                              -refs_to_drop, NULL);
1814         } else if (is_data) {
1815                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1816         } else {
1817                 ret = btrfs_del_item(trans, root, path);
1818         }
1819         return ret;
1820 }
1821
1822 static int btrfs_issue_discard(struct block_device *bdev,
1823                                 u64 start, u64 len)
1824 {
1825         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1826 }
1827
1828 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1829                                 u64 num_bytes, u64 *actual_bytes)
1830 {
1831         int ret;
1832         u64 discarded_bytes = 0;
1833         struct btrfs_bio *bbio = NULL;
1834
1835
1836         /* Tell the block device(s) that the sectors can be discarded */
1837         ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
1838                               bytenr, &num_bytes, &bbio, 0);
1839         /* Error condition is -ENOMEM */
1840         if (!ret) {
1841                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1842                 int i;
1843
1844
1845                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1846                         if (!stripe->dev->can_discard)
1847                                 continue;
1848
1849                         ret = btrfs_issue_discard(stripe->dev->bdev,
1850                                                   stripe->physical,
1851                                                   stripe->length);
1852                         if (!ret)
1853                                 discarded_bytes += stripe->length;
1854                         else if (ret != -EOPNOTSUPP)
1855                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1856
1857                         /*
1858                          * Just in case we get back EOPNOTSUPP for some reason,
1859                          * just ignore the return value so we don't screw up
1860                          * people calling discard_extent.
1861                          */
1862                         ret = 0;
1863                 }
1864                 kfree(bbio);
1865         }
1866
1867         if (actual_bytes)
1868                 *actual_bytes = discarded_bytes;
1869
1870
1871         return ret;
1872 }
1873
1874 /* Can return -ENOMEM */
1875 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1876                          struct btrfs_root *root,
1877                          u64 bytenr, u64 num_bytes, u64 parent,
1878                          u64 root_objectid, u64 owner, u64 offset, int for_cow)
1879 {
1880         int ret;
1881         struct btrfs_fs_info *fs_info = root->fs_info;
1882
1883         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1884                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1885
1886         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1887                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1888                                         num_bytes,
1889                                         parent, root_objectid, (int)owner,
1890                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1891         } else {
1892                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1893                                         num_bytes,
1894                                         parent, root_objectid, owner, offset,
1895                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1896         }
1897         return ret;
1898 }
1899
1900 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1901                                   struct btrfs_root *root,
1902                                   u64 bytenr, u64 num_bytes,
1903                                   u64 parent, u64 root_objectid,
1904                                   u64 owner, u64 offset, int refs_to_add,
1905                                   struct btrfs_delayed_extent_op *extent_op)
1906 {
1907         struct btrfs_path *path;
1908         struct extent_buffer *leaf;
1909         struct btrfs_extent_item *item;
1910         u64 refs;
1911         int ret;
1912         int err = 0;
1913
1914         path = btrfs_alloc_path();
1915         if (!path)
1916                 return -ENOMEM;
1917
1918         path->reada = 1;
1919         path->leave_spinning = 1;
1920         /* this will setup the path even if it fails to insert the back ref */
1921         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1922                                            path, bytenr, num_bytes, parent,
1923                                            root_objectid, owner, offset,
1924                                            refs_to_add, extent_op);
1925         if (ret == 0)
1926                 goto out;
1927
1928         if (ret != -EAGAIN) {
1929                 err = ret;
1930                 goto out;
1931         }
1932
1933         leaf = path->nodes[0];
1934         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1935         refs = btrfs_extent_refs(leaf, item);
1936         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1937         if (extent_op)
1938                 __run_delayed_extent_op(extent_op, leaf, item);
1939
1940         btrfs_mark_buffer_dirty(leaf);
1941         btrfs_release_path(path);
1942
1943         path->reada = 1;
1944         path->leave_spinning = 1;
1945
1946         /* now insert the actual backref */
1947         ret = insert_extent_backref(trans, root->fs_info->extent_root,
1948                                     path, bytenr, parent, root_objectid,
1949                                     owner, offset, refs_to_add);
1950         if (ret)
1951                 btrfs_abort_transaction(trans, root, ret);
1952 out:
1953         btrfs_free_path(path);
1954         return err;
1955 }
1956
1957 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1958                                 struct btrfs_root *root,
1959                                 struct btrfs_delayed_ref_node *node,
1960                                 struct btrfs_delayed_extent_op *extent_op,
1961                                 int insert_reserved)
1962 {
1963         int ret = 0;
1964         struct btrfs_delayed_data_ref *ref;
1965         struct btrfs_key ins;
1966         u64 parent = 0;
1967         u64 ref_root = 0;
1968         u64 flags = 0;
1969
1970         ins.objectid = node->bytenr;
1971         ins.offset = node->num_bytes;
1972         ins.type = BTRFS_EXTENT_ITEM_KEY;
1973
1974         ref = btrfs_delayed_node_to_data_ref(node);
1975         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1976                 parent = ref->parent;
1977         else
1978                 ref_root = ref->root;
1979
1980         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1981                 if (extent_op) {
1982                         BUG_ON(extent_op->update_key);
1983                         flags |= extent_op->flags_to_set;
1984                 }
1985                 ret = alloc_reserved_file_extent(trans, root,
1986                                                  parent, ref_root, flags,
1987                                                  ref->objectid, ref->offset,
1988                                                  &ins, node->ref_mod);
1989         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1990                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1991                                              node->num_bytes, parent,
1992                                              ref_root, ref->objectid,
1993                                              ref->offset, node->ref_mod,
1994                                              extent_op);
1995         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1996                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1997                                           node->num_bytes, parent,
1998                                           ref_root, ref->objectid,
1999                                           ref->offset, node->ref_mod,
2000                                           extent_op);
2001         } else {
2002                 BUG();
2003         }
2004         return ret;
2005 }
2006
2007 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2008                                     struct extent_buffer *leaf,
2009                                     struct btrfs_extent_item *ei)
2010 {
2011         u64 flags = btrfs_extent_flags(leaf, ei);
2012         if (extent_op->update_flags) {
2013                 flags |= extent_op->flags_to_set;
2014                 btrfs_set_extent_flags(leaf, ei, flags);
2015         }
2016
2017         if (extent_op->update_key) {
2018                 struct btrfs_tree_block_info *bi;
2019                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2020                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2021                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2022         }
2023 }
2024
2025 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2026                                  struct btrfs_root *root,
2027                                  struct btrfs_delayed_ref_node *node,
2028                                  struct btrfs_delayed_extent_op *extent_op)
2029 {
2030         struct btrfs_key key;
2031         struct btrfs_path *path;
2032         struct btrfs_extent_item *ei;
2033         struct extent_buffer *leaf;
2034         u32 item_size;
2035         int ret;
2036         int err = 0;
2037
2038         if (trans->aborted)
2039                 return 0;
2040
2041         path = btrfs_alloc_path();
2042         if (!path)
2043                 return -ENOMEM;
2044
2045         key.objectid = node->bytenr;
2046         key.type = BTRFS_EXTENT_ITEM_KEY;
2047         key.offset = node->num_bytes;
2048
2049         path->reada = 1;
2050         path->leave_spinning = 1;
2051         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2052                                 path, 0, 1);
2053         if (ret < 0) {
2054                 err = ret;
2055                 goto out;
2056         }
2057         if (ret > 0) {
2058                 err = -EIO;
2059                 goto out;
2060         }
2061
2062         leaf = path->nodes[0];
2063         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2064 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2065         if (item_size < sizeof(*ei)) {
2066                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2067                                              path, (u64)-1, 0);
2068                 if (ret < 0) {
2069                         err = ret;
2070                         goto out;
2071                 }
2072                 leaf = path->nodes[0];
2073                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2074         }
2075 #endif
2076         BUG_ON(item_size < sizeof(*ei));
2077         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2078         __run_delayed_extent_op(extent_op, leaf, ei);
2079
2080         btrfs_mark_buffer_dirty(leaf);
2081 out:
2082         btrfs_free_path(path);
2083         return err;
2084 }
2085
2086 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2087                                 struct btrfs_root *root,
2088                                 struct btrfs_delayed_ref_node *node,
2089                                 struct btrfs_delayed_extent_op *extent_op,
2090                                 int insert_reserved)
2091 {
2092         int ret = 0;
2093         struct btrfs_delayed_tree_ref *ref;
2094         struct btrfs_key ins;
2095         u64 parent = 0;
2096         u64 ref_root = 0;
2097
2098         ins.objectid = node->bytenr;
2099         ins.offset = node->num_bytes;
2100         ins.type = BTRFS_EXTENT_ITEM_KEY;
2101
2102         ref = btrfs_delayed_node_to_tree_ref(node);
2103         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2104                 parent = ref->parent;
2105         else
2106                 ref_root = ref->root;
2107
2108         BUG_ON(node->ref_mod != 1);
2109         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2110                 BUG_ON(!extent_op || !extent_op->update_flags ||
2111                        !extent_op->update_key);
2112                 ret = alloc_reserved_tree_block(trans, root,
2113                                                 parent, ref_root,
2114                                                 extent_op->flags_to_set,
2115                                                 &extent_op->key,
2116                                                 ref->level, &ins);
2117         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2118                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2119                                              node->num_bytes, parent, ref_root,
2120                                              ref->level, 0, 1, extent_op);
2121         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2122                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2123                                           node->num_bytes, parent, ref_root,
2124                                           ref->level, 0, 1, extent_op);
2125         } else {
2126                 BUG();
2127         }
2128         return ret;
2129 }
2130
2131 /* helper function to actually process a single delayed ref entry */
2132 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2133                                struct btrfs_root *root,
2134                                struct btrfs_delayed_ref_node *node,
2135                                struct btrfs_delayed_extent_op *extent_op,
2136                                int insert_reserved)
2137 {
2138         int ret = 0;
2139
2140         if (trans->aborted)
2141                 return 0;
2142
2143         if (btrfs_delayed_ref_is_head(node)) {
2144                 struct btrfs_delayed_ref_head *head;
2145                 /*
2146                  * we've hit the end of the chain and we were supposed
2147                  * to insert this extent into the tree.  But, it got
2148                  * deleted before we ever needed to insert it, so all
2149                  * we have to do is clean up the accounting
2150                  */
2151                 BUG_ON(extent_op);
2152                 head = btrfs_delayed_node_to_head(node);
2153                 if (insert_reserved) {
2154                         btrfs_pin_extent(root, node->bytenr,
2155                                          node->num_bytes, 1);
2156                         if (head->is_data) {
2157                                 ret = btrfs_del_csums(trans, root,
2158                                                       node->bytenr,
2159                                                       node->num_bytes);
2160                         }
2161                 }
2162                 mutex_unlock(&head->mutex);
2163                 return ret;
2164         }
2165
2166         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2167             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2168                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2169                                            insert_reserved);
2170         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2171                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2172                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2173                                            insert_reserved);
2174         else
2175                 BUG();
2176         return ret;
2177 }
2178
2179 static noinline struct btrfs_delayed_ref_node *
2180 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2181 {
2182         struct rb_node *node;
2183         struct btrfs_delayed_ref_node *ref;
2184         int action = BTRFS_ADD_DELAYED_REF;
2185 again:
2186         /*
2187          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2188          * this prevents ref count from going down to zero when
2189          * there still are pending delayed ref.
2190          */
2191         node = rb_prev(&head->node.rb_node);
2192         while (1) {
2193                 if (!node)
2194                         break;
2195                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2196                                 rb_node);
2197                 if (ref->bytenr != head->node.bytenr)
2198                         break;
2199                 if (ref->action == action)
2200                         return ref;
2201                 node = rb_prev(node);
2202         }
2203         if (action == BTRFS_ADD_DELAYED_REF) {
2204                 action = BTRFS_DROP_DELAYED_REF;
2205                 goto again;
2206         }
2207         return NULL;
2208 }
2209
2210 /*
2211  * Returns 0 on success or if called with an already aborted transaction.
2212  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2213  */
2214 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2215                                        struct btrfs_root *root,
2216                                        struct list_head *cluster)
2217 {
2218         struct btrfs_delayed_ref_root *delayed_refs;
2219         struct btrfs_delayed_ref_node *ref;
2220         struct btrfs_delayed_ref_head *locked_ref = NULL;
2221         struct btrfs_delayed_extent_op *extent_op;
2222         struct btrfs_fs_info *fs_info = root->fs_info;
2223         int ret;
2224         int count = 0;
2225         int must_insert_reserved = 0;
2226
2227         delayed_refs = &trans->transaction->delayed_refs;
2228         while (1) {
2229                 if (!locked_ref) {
2230                         /* pick a new head ref from the cluster list */
2231                         if (list_empty(cluster))
2232                                 break;
2233
2234                         locked_ref = list_entry(cluster->next,
2235                                      struct btrfs_delayed_ref_head, cluster);
2236
2237                         /* grab the lock that says we are going to process
2238                          * all the refs for this head */
2239                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2240
2241                         /*
2242                          * we may have dropped the spin lock to get the head
2243                          * mutex lock, and that might have given someone else
2244                          * time to free the head.  If that's true, it has been
2245                          * removed from our list and we can move on.
2246                          */
2247                         if (ret == -EAGAIN) {
2248                                 locked_ref = NULL;
2249                                 count++;
2250                                 continue;
2251                         }
2252                 }
2253
2254                 /*
2255                  * locked_ref is the head node, so we have to go one
2256                  * node back for any delayed ref updates
2257                  */
2258                 ref = select_delayed_ref(locked_ref);
2259
2260                 if (ref && ref->seq &&
2261                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2262                         /*
2263                          * there are still refs with lower seq numbers in the
2264                          * process of being added. Don't run this ref yet.
2265                          */
2266                         list_del_init(&locked_ref->cluster);
2267                         mutex_unlock(&locked_ref->mutex);
2268                         locked_ref = NULL;
2269                         delayed_refs->num_heads_ready++;
2270                         spin_unlock(&delayed_refs->lock);
2271                         cond_resched();
2272                         spin_lock(&delayed_refs->lock);
2273                         continue;
2274                 }
2275
2276                 /*
2277                  * record the must insert reserved flag before we
2278                  * drop the spin lock.
2279                  */
2280                 must_insert_reserved = locked_ref->must_insert_reserved;
2281                 locked_ref->must_insert_reserved = 0;
2282
2283                 extent_op = locked_ref->extent_op;
2284                 locked_ref->extent_op = NULL;
2285
2286                 if (!ref) {
2287                         /* All delayed refs have been processed, Go ahead
2288                          * and send the head node to run_one_delayed_ref,
2289                          * so that any accounting fixes can happen
2290                          */
2291                         ref = &locked_ref->node;
2292
2293                         if (extent_op && must_insert_reserved) {
2294                                 kfree(extent_op);
2295                                 extent_op = NULL;
2296                         }
2297
2298                         if (extent_op) {
2299                                 spin_unlock(&delayed_refs->lock);
2300
2301                                 ret = run_delayed_extent_op(trans, root,
2302                                                             ref, extent_op);
2303                                 kfree(extent_op);
2304
2305                                 if (ret) {
2306                                         printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret);
2307                                         spin_lock(&delayed_refs->lock);
2308                                         return ret;
2309                                 }
2310
2311                                 goto next;
2312                         }
2313
2314                         list_del_init(&locked_ref->cluster);
2315                         locked_ref = NULL;
2316                 }
2317
2318                 ref->in_tree = 0;
2319                 rb_erase(&ref->rb_node, &delayed_refs->root);
2320                 delayed_refs->num_entries--;
2321                 /*
2322                  * we modified num_entries, but as we're currently running
2323                  * delayed refs, skip
2324                  *     wake_up(&delayed_refs->seq_wait);
2325                  * here.
2326                  */
2327                 spin_unlock(&delayed_refs->lock);
2328
2329                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2330                                           must_insert_reserved);
2331
2332                 btrfs_put_delayed_ref(ref);
2333                 kfree(extent_op);
2334                 count++;
2335
2336                 if (ret) {
2337                         printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret);
2338                         spin_lock(&delayed_refs->lock);
2339                         return ret;
2340                 }
2341
2342 next:
2343                 do_chunk_alloc(trans, fs_info->extent_root,
2344                                2 * 1024 * 1024,
2345                                btrfs_get_alloc_profile(root, 0),
2346                                CHUNK_ALLOC_NO_FORCE);
2347                 cond_resched();
2348                 spin_lock(&delayed_refs->lock);
2349         }
2350         return count;
2351 }
2352
2353 static void wait_for_more_refs(struct btrfs_fs_info *fs_info,
2354                                struct btrfs_delayed_ref_root *delayed_refs,
2355                                unsigned long num_refs,
2356                                struct list_head *first_seq)
2357 {
2358         spin_unlock(&delayed_refs->lock);
2359         pr_debug("waiting for more refs (num %ld, first %p)\n",
2360                  num_refs, first_seq);
2361         wait_event(fs_info->tree_mod_seq_wait,
2362                    num_refs != delayed_refs->num_entries ||
2363                    fs_info->tree_mod_seq_list.next != first_seq);
2364         pr_debug("done waiting for more refs (num %ld, first %p)\n",
2365                  delayed_refs->num_entries, fs_info->tree_mod_seq_list.next);
2366         spin_lock(&delayed_refs->lock);
2367 }
2368
2369 #ifdef SCRAMBLE_DELAYED_REFS
2370 /*
2371  * Normally delayed refs get processed in ascending bytenr order. This
2372  * correlates in most cases to the order added. To expose dependencies on this
2373  * order, we start to process the tree in the middle instead of the beginning
2374  */
2375 static u64 find_middle(struct rb_root *root)
2376 {
2377         struct rb_node *n = root->rb_node;
2378         struct btrfs_delayed_ref_node *entry;
2379         int alt = 1;
2380         u64 middle;
2381         u64 first = 0, last = 0;
2382
2383         n = rb_first(root);
2384         if (n) {
2385                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2386                 first = entry->bytenr;
2387         }
2388         n = rb_last(root);
2389         if (n) {
2390                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2391                 last = entry->bytenr;
2392         }
2393         n = root->rb_node;
2394
2395         while (n) {
2396                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2397                 WARN_ON(!entry->in_tree);
2398
2399                 middle = entry->bytenr;
2400
2401                 if (alt)
2402                         n = n->rb_left;
2403                 else
2404                         n = n->rb_right;
2405
2406                 alt = 1 - alt;
2407         }
2408         return middle;
2409 }
2410 #endif
2411
2412 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2413                                          struct btrfs_fs_info *fs_info)
2414 {
2415         struct qgroup_update *qgroup_update;
2416         int ret = 0;
2417
2418         if (list_empty(&trans->qgroup_ref_list) !=
2419             !trans->delayed_ref_elem.seq) {
2420                 /* list without seq or seq without list */
2421                 printk(KERN_ERR "btrfs: qgroup accounting update error, list is%s empty, seq is %llu\n",
2422                         list_empty(&trans->qgroup_ref_list) ? "" : " not",
2423                         trans->delayed_ref_elem.seq);
2424                 BUG();
2425         }
2426
2427         if (!trans->delayed_ref_elem.seq)
2428                 return 0;
2429
2430         while (!list_empty(&trans->qgroup_ref_list)) {
2431                 qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2432                                                  struct qgroup_update, list);
2433                 list_del(&qgroup_update->list);
2434                 if (!ret)
2435                         ret = btrfs_qgroup_account_ref(
2436                                         trans, fs_info, qgroup_update->node,
2437                                         qgroup_update->extent_op);
2438                 kfree(qgroup_update);
2439         }
2440
2441         btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2442
2443         return ret;
2444 }
2445
2446 /*
2447  * this starts processing the delayed reference count updates and
2448  * extent insertions we have queued up so far.  count can be
2449  * 0, which means to process everything in the tree at the start
2450  * of the run (but not newly added entries), or it can be some target
2451  * number you'd like to process.
2452  *
2453  * Returns 0 on success or if called with an aborted transaction
2454  * Returns <0 on error and aborts the transaction
2455  */
2456 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2457                            struct btrfs_root *root, unsigned long count)
2458 {
2459         struct rb_node *node;
2460         struct btrfs_delayed_ref_root *delayed_refs;
2461         struct btrfs_delayed_ref_node *ref;
2462         struct list_head cluster;
2463         struct list_head *first_seq = NULL;
2464         int ret;
2465         u64 delayed_start;
2466         int run_all = count == (unsigned long)-1;
2467         int run_most = 0;
2468         unsigned long num_refs = 0;
2469         int consider_waiting;
2470
2471         /* We'll clean this up in btrfs_cleanup_transaction */
2472         if (trans->aborted)
2473                 return 0;
2474
2475         if (root == root->fs_info->extent_root)
2476                 root = root->fs_info->tree_root;
2477
2478         do_chunk_alloc(trans, root->fs_info->extent_root,
2479                        2 * 1024 * 1024, btrfs_get_alloc_profile(root, 0),
2480                        CHUNK_ALLOC_NO_FORCE);
2481
2482         btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2483
2484         delayed_refs = &trans->transaction->delayed_refs;
2485         INIT_LIST_HEAD(&cluster);
2486 again:
2487         consider_waiting = 0;
2488         spin_lock(&delayed_refs->lock);
2489
2490 #ifdef SCRAMBLE_DELAYED_REFS
2491         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2492 #endif
2493
2494         if (count == 0) {
2495                 count = delayed_refs->num_entries * 2;
2496                 run_most = 1;
2497         }
2498         while (1) {
2499                 if (!(run_all || run_most) &&
2500                     delayed_refs->num_heads_ready < 64)
2501                         break;
2502
2503                 /*
2504                  * go find something we can process in the rbtree.  We start at
2505                  * the beginning of the tree, and then build a cluster
2506                  * of refs to process starting at the first one we are able to
2507                  * lock
2508                  */
2509                 delayed_start = delayed_refs->run_delayed_start;
2510                 ret = btrfs_find_ref_cluster(trans, &cluster,
2511                                              delayed_refs->run_delayed_start);
2512                 if (ret)
2513                         break;
2514
2515                 if (delayed_start >= delayed_refs->run_delayed_start) {
2516                         if (consider_waiting == 0) {
2517                                 /*
2518                                  * btrfs_find_ref_cluster looped. let's do one
2519                                  * more cycle. if we don't run any delayed ref
2520                                  * during that cycle (because we can't because
2521                                  * all of them are blocked) and if the number of
2522                                  * refs doesn't change, we avoid busy waiting.
2523                                  */
2524                                 consider_waiting = 1;
2525                                 num_refs = delayed_refs->num_entries;
2526                                 first_seq = root->fs_info->tree_mod_seq_list.next;
2527                         } else {
2528                                 wait_for_more_refs(root->fs_info, delayed_refs,
2529                                                    num_refs, first_seq);
2530                                 /*
2531                                  * after waiting, things have changed. we
2532                                  * dropped the lock and someone else might have
2533                                  * run some refs, built new clusters and so on.
2534                                  * therefore, we restart staleness detection.
2535                                  */
2536                                 consider_waiting = 0;
2537                         }
2538                 }
2539
2540                 ret = run_clustered_refs(trans, root, &cluster);
2541                 if (ret < 0) {
2542                         spin_unlock(&delayed_refs->lock);
2543                         btrfs_abort_transaction(trans, root, ret);
2544                         return ret;
2545                 }
2546
2547                 count -= min_t(unsigned long, ret, count);
2548
2549                 if (count == 0)
2550                         break;
2551
2552                 if (ret || delayed_refs->run_delayed_start == 0) {
2553                         /* refs were run, let's reset staleness detection */
2554                         consider_waiting = 0;
2555                 }
2556         }
2557
2558         if (run_all) {
2559                 node = rb_first(&delayed_refs->root);
2560                 if (!node)
2561                         goto out;
2562                 count = (unsigned long)-1;
2563
2564                 while (node) {
2565                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2566                                        rb_node);
2567                         if (btrfs_delayed_ref_is_head(ref)) {
2568                                 struct btrfs_delayed_ref_head *head;
2569
2570                                 head = btrfs_delayed_node_to_head(ref);
2571                                 atomic_inc(&ref->refs);
2572
2573                                 spin_unlock(&delayed_refs->lock);
2574                                 /*
2575                                  * Mutex was contended, block until it's
2576                                  * released and try again
2577                                  */
2578                                 mutex_lock(&head->mutex);
2579                                 mutex_unlock(&head->mutex);
2580
2581                                 btrfs_put_delayed_ref(ref);
2582                                 cond_resched();
2583                                 goto again;
2584                         }
2585                         node = rb_next(node);
2586                 }
2587                 spin_unlock(&delayed_refs->lock);
2588                 schedule_timeout(1);
2589                 goto again;
2590         }
2591 out:
2592         spin_unlock(&delayed_refs->lock);
2593         assert_qgroups_uptodate(trans);
2594         return 0;
2595 }
2596
2597 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2598                                 struct btrfs_root *root,
2599                                 u64 bytenr, u64 num_bytes, u64 flags,
2600                                 int is_data)
2601 {
2602         struct btrfs_delayed_extent_op *extent_op;
2603         int ret;
2604
2605         extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2606         if (!extent_op)
2607                 return -ENOMEM;
2608
2609         extent_op->flags_to_set = flags;
2610         extent_op->update_flags = 1;
2611         extent_op->update_key = 0;
2612         extent_op->is_data = is_data ? 1 : 0;
2613
2614         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2615                                           num_bytes, extent_op);
2616         if (ret)
2617                 kfree(extent_op);
2618         return ret;
2619 }
2620
2621 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2622                                       struct btrfs_root *root,
2623                                       struct btrfs_path *path,
2624                                       u64 objectid, u64 offset, u64 bytenr)
2625 {
2626         struct btrfs_delayed_ref_head *head;
2627         struct btrfs_delayed_ref_node *ref;
2628         struct btrfs_delayed_data_ref *data_ref;
2629         struct btrfs_delayed_ref_root *delayed_refs;
2630         struct rb_node *node;
2631         int ret = 0;
2632
2633         ret = -ENOENT;
2634         delayed_refs = &trans->transaction->delayed_refs;
2635         spin_lock(&delayed_refs->lock);
2636         head = btrfs_find_delayed_ref_head(trans, bytenr);
2637         if (!head)
2638                 goto out;
2639
2640         if (!mutex_trylock(&head->mutex)) {
2641                 atomic_inc(&head->node.refs);
2642                 spin_unlock(&delayed_refs->lock);
2643
2644                 btrfs_release_path(path);
2645
2646                 /*
2647                  * Mutex was contended, block until it's released and let
2648                  * caller try again
2649                  */
2650                 mutex_lock(&head->mutex);
2651                 mutex_unlock(&head->mutex);
2652                 btrfs_put_delayed_ref(&head->node);
2653                 return -EAGAIN;
2654         }
2655
2656         node = rb_prev(&head->node.rb_node);
2657         if (!node)
2658                 goto out_unlock;
2659
2660         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2661
2662         if (ref->bytenr != bytenr)
2663                 goto out_unlock;
2664
2665         ret = 1;
2666         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2667                 goto out_unlock;
2668
2669         data_ref = btrfs_delayed_node_to_data_ref(ref);
2670
2671         node = rb_prev(node);
2672         if (node) {
2673                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2674                 if (ref->bytenr == bytenr)
2675                         goto out_unlock;
2676         }
2677
2678         if (data_ref->root != root->root_key.objectid ||
2679             data_ref->objectid != objectid || data_ref->offset != offset)
2680                 goto out_unlock;
2681
2682         ret = 0;
2683 out_unlock:
2684         mutex_unlock(&head->mutex);
2685 out:
2686         spin_unlock(&delayed_refs->lock);
2687         return ret;
2688 }
2689
2690 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2691                                         struct btrfs_root *root,
2692                                         struct btrfs_path *path,
2693                                         u64 objectid, u64 offset, u64 bytenr)
2694 {
2695         struct btrfs_root *extent_root = root->fs_info->extent_root;
2696         struct extent_buffer *leaf;
2697         struct btrfs_extent_data_ref *ref;
2698         struct btrfs_extent_inline_ref *iref;
2699         struct btrfs_extent_item *ei;
2700         struct btrfs_key key;
2701         u32 item_size;
2702         int ret;
2703
2704         key.objectid = bytenr;
2705         key.offset = (u64)-1;
2706         key.type = BTRFS_EXTENT_ITEM_KEY;
2707
2708         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2709         if (ret < 0)
2710                 goto out;
2711         BUG_ON(ret == 0); /* Corruption */
2712
2713         ret = -ENOENT;
2714         if (path->slots[0] == 0)
2715                 goto out;
2716
2717         path->slots[0]--;
2718         leaf = path->nodes[0];
2719         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2720
2721         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2722                 goto out;
2723
2724         ret = 1;
2725         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2726 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2727         if (item_size < sizeof(*ei)) {
2728                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2729                 goto out;
2730         }
2731 #endif
2732         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2733
2734         if (item_size != sizeof(*ei) +
2735             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2736                 goto out;
2737
2738         if (btrfs_extent_generation(leaf, ei) <=
2739             btrfs_root_last_snapshot(&root->root_item))
2740                 goto out;
2741
2742         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2743         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2744             BTRFS_EXTENT_DATA_REF_KEY)
2745                 goto out;
2746
2747         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2748         if (btrfs_extent_refs(leaf, ei) !=
2749             btrfs_extent_data_ref_count(leaf, ref) ||
2750             btrfs_extent_data_ref_root(leaf, ref) !=
2751             root->root_key.objectid ||
2752             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2753             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2754                 goto out;
2755
2756         ret = 0;
2757 out:
2758         return ret;
2759 }
2760
2761 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2762                           struct btrfs_root *root,
2763                           u64 objectid, u64 offset, u64 bytenr)
2764 {
2765         struct btrfs_path *path;
2766         int ret;
2767         int ret2;
2768
2769         path = btrfs_alloc_path();
2770         if (!path)
2771                 return -ENOENT;
2772
2773         do {
2774                 ret = check_committed_ref(trans, root, path, objectid,
2775                                           offset, bytenr);
2776                 if (ret && ret != -ENOENT)
2777                         goto out;
2778
2779                 ret2 = check_delayed_ref(trans, root, path, objectid,
2780                                          offset, bytenr);
2781         } while (ret2 == -EAGAIN);
2782
2783         if (ret2 && ret2 != -ENOENT) {
2784                 ret = ret2;
2785                 goto out;
2786         }
2787
2788         if (ret != -ENOENT || ret2 != -ENOENT)
2789                 ret = 0;
2790 out:
2791         btrfs_free_path(path);
2792         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2793                 WARN_ON(ret > 0);
2794         return ret;
2795 }
2796
2797 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2798                            struct btrfs_root *root,
2799                            struct extent_buffer *buf,
2800                            int full_backref, int inc, int for_cow)
2801 {
2802         u64 bytenr;
2803         u64 num_bytes;
2804         u64 parent;
2805         u64 ref_root;
2806         u32 nritems;
2807         struct btrfs_key key;
2808         struct btrfs_file_extent_item *fi;
2809         int i;
2810         int level;
2811         int ret = 0;
2812         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2813                             u64, u64, u64, u64, u64, u64, int);
2814
2815         ref_root = btrfs_header_owner(buf);
2816         nritems = btrfs_header_nritems(buf);
2817         level = btrfs_header_level(buf);
2818
2819         if (!root->ref_cows && level == 0)
2820                 return 0;
2821
2822         if (inc)
2823                 process_func = btrfs_inc_extent_ref;
2824         else
2825                 process_func = btrfs_free_extent;
2826
2827         if (full_backref)
2828                 parent = buf->start;
2829         else
2830                 parent = 0;
2831
2832         for (i = 0; i < nritems; i++) {
2833                 if (level == 0) {
2834                         btrfs_item_key_to_cpu(buf, &key, i);
2835                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2836                                 continue;
2837                         fi = btrfs_item_ptr(buf, i,
2838                                             struct btrfs_file_extent_item);
2839                         if (btrfs_file_extent_type(buf, fi) ==
2840                             BTRFS_FILE_EXTENT_INLINE)
2841                                 continue;
2842                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2843                         if (bytenr == 0)
2844                                 continue;
2845
2846                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2847                         key.offset -= btrfs_file_extent_offset(buf, fi);
2848                         ret = process_func(trans, root, bytenr, num_bytes,
2849                                            parent, ref_root, key.objectid,
2850                                            key.offset, for_cow);
2851                         if (ret)
2852                                 goto fail;
2853                 } else {
2854                         bytenr = btrfs_node_blockptr(buf, i);
2855                         num_bytes = btrfs_level_size(root, level - 1);
2856                         ret = process_func(trans, root, bytenr, num_bytes,
2857                                            parent, ref_root, level - 1, 0,
2858                                            for_cow);
2859                         if (ret)
2860                                 goto fail;
2861                 }
2862         }
2863         return 0;
2864 fail:
2865         return ret;
2866 }
2867
2868 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2869                   struct extent_buffer *buf, int full_backref, int for_cow)
2870 {
2871         return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
2872 }
2873
2874 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2875                   struct extent_buffer *buf, int full_backref, int for_cow)
2876 {
2877         return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
2878 }
2879
2880 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2881                                  struct btrfs_root *root,
2882                                  struct btrfs_path *path,
2883                                  struct btrfs_block_group_cache *cache)
2884 {
2885         int ret;
2886         struct btrfs_root *extent_root = root->fs_info->extent_root;
2887         unsigned long bi;
2888         struct extent_buffer *leaf;
2889
2890         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2891         if (ret < 0)
2892                 goto fail;
2893         BUG_ON(ret); /* Corruption */
2894
2895         leaf = path->nodes[0];
2896         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2897         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2898         btrfs_mark_buffer_dirty(leaf);
2899         btrfs_release_path(path);
2900 fail:
2901         if (ret) {
2902                 btrfs_abort_transaction(trans, root, ret);
2903                 return ret;
2904         }
2905         return 0;
2906
2907 }
2908
2909 static struct btrfs_block_group_cache *
2910 next_block_group(struct btrfs_root *root,
2911                  struct btrfs_block_group_cache *cache)
2912 {
2913         struct rb_node *node;
2914         spin_lock(&root->fs_info->block_group_cache_lock);
2915         node = rb_next(&cache->cache_node);
2916         btrfs_put_block_group(cache);
2917         if (node) {
2918                 cache = rb_entry(node, struct btrfs_block_group_cache,
2919                                  cache_node);
2920                 btrfs_get_block_group(cache);
2921         } else
2922                 cache = NULL;
2923         spin_unlock(&root->fs_info->block_group_cache_lock);
2924         return cache;
2925 }
2926
2927 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2928                             struct btrfs_trans_handle *trans,
2929                             struct btrfs_path *path)
2930 {
2931         struct btrfs_root *root = block_group->fs_info->tree_root;
2932         struct inode *inode = NULL;
2933         u64 alloc_hint = 0;
2934         int dcs = BTRFS_DC_ERROR;
2935         int num_pages = 0;
2936         int retries = 0;
2937         int ret = 0;
2938
2939         /*
2940          * If this block group is smaller than 100 megs don't bother caching the
2941          * block group.
2942          */
2943         if (block_group->key.offset < (100 * 1024 * 1024)) {
2944                 spin_lock(&block_group->lock);
2945                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2946                 spin_unlock(&block_group->lock);
2947                 return 0;
2948         }
2949
2950 again:
2951         inode = lookup_free_space_inode(root, block_group, path);
2952         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2953                 ret = PTR_ERR(inode);
2954                 btrfs_release_path(path);
2955                 goto out;
2956         }
2957
2958         if (IS_ERR(inode)) {
2959                 BUG_ON(retries);
2960                 retries++;
2961
2962                 if (block_group->ro)
2963                         goto out_free;
2964
2965                 ret = create_free_space_inode(root, trans, block_group, path);
2966                 if (ret)
2967                         goto out_free;
2968                 goto again;
2969         }
2970
2971         /* We've already setup this transaction, go ahead and exit */
2972         if (block_group->cache_generation == trans->transid &&
2973             i_size_read(inode)) {
2974                 dcs = BTRFS_DC_SETUP;
2975                 goto out_put;
2976         }
2977
2978         /*
2979          * We want to set the generation to 0, that way if anything goes wrong
2980          * from here on out we know not to trust this cache when we load up next
2981          * time.
2982          */
2983         BTRFS_I(inode)->generation = 0;
2984         ret = btrfs_update_inode(trans, root, inode);
2985         WARN_ON(ret);
2986
2987         if (i_size_read(inode) > 0) {
2988                 ret = btrfs_truncate_free_space_cache(root, trans, path,
2989                                                       inode);
2990                 if (ret)
2991                         goto out_put;
2992         }
2993
2994         spin_lock(&block_group->lock);
2995         if (block_group->cached != BTRFS_CACHE_FINISHED) {
2996                 /* We're not cached, don't bother trying to write stuff out */
2997                 dcs = BTRFS_DC_WRITTEN;
2998                 spin_unlock(&block_group->lock);
2999                 goto out_put;
3000         }
3001         spin_unlock(&block_group->lock);
3002
3003         num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
3004         if (!num_pages)
3005                 num_pages = 1;
3006
3007         /*
3008          * Just to make absolutely sure we have enough space, we're going to
3009          * preallocate 12 pages worth of space for each block group.  In
3010          * practice we ought to use at most 8, but we need extra space so we can
3011          * add our header and have a terminator between the extents and the
3012          * bitmaps.
3013          */
3014         num_pages *= 16;
3015         num_pages *= PAGE_CACHE_SIZE;
3016
3017         ret = btrfs_check_data_free_space(inode, num_pages);
3018         if (ret)
3019                 goto out_put;
3020
3021         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3022                                               num_pages, num_pages,
3023                                               &alloc_hint);
3024         if (!ret)
3025                 dcs = BTRFS_DC_SETUP;
3026         btrfs_free_reserved_data_space(inode, num_pages);
3027
3028 out_put:
3029         iput(inode);
3030 out_free:
3031         btrfs_release_path(path);
3032 out:
3033         spin_lock(&block_group->lock);
3034         if (!ret && dcs == BTRFS_DC_SETUP)
3035                 block_group->cache_generation = trans->transid;
3036         block_group->disk_cache_state = dcs;
3037         spin_unlock(&block_group->lock);
3038
3039         return ret;
3040 }
3041
3042 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3043                                    struct btrfs_root *root)
3044 {
3045         struct btrfs_block_group_cache *cache;
3046         int err = 0;
3047         struct btrfs_path *path;
3048         u64 last = 0;
3049
3050         path = btrfs_alloc_path();
3051         if (!path)
3052                 return -ENOMEM;
3053
3054 again:
3055         while (1) {
3056                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3057                 while (cache) {
3058                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3059                                 break;
3060                         cache = next_block_group(root, cache);
3061                 }
3062                 if (!cache) {
3063                         if (last == 0)
3064                                 break;
3065                         last = 0;
3066                         continue;
3067                 }
3068                 err = cache_save_setup(cache, trans, path);
3069                 last = cache->key.objectid + cache->key.offset;
3070                 btrfs_put_block_group(cache);
3071         }
3072
3073         while (1) {
3074                 if (last == 0) {
3075                         err = btrfs_run_delayed_refs(trans, root,
3076                                                      (unsigned long)-1);
3077                         if (err) /* File system offline */
3078                                 goto out;
3079                 }
3080
3081                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3082                 while (cache) {
3083                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3084                                 btrfs_put_block_group(cache);
3085                                 goto again;
3086                         }
3087
3088                         if (cache->dirty)
3089                                 break;
3090                         cache = next_block_group(root, cache);
3091                 }
3092                 if (!cache) {
3093                         if (last == 0)
3094                                 break;
3095                         last = 0;
3096                         continue;
3097                 }
3098
3099                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3100                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3101                 cache->dirty = 0;
3102                 last = cache->key.objectid + cache->key.offset;
3103
3104                 err = write_one_cache_group(trans, root, path, cache);
3105                 if (err) /* File system offline */
3106                         goto out;
3107
3108                 btrfs_put_block_group(cache);
3109         }
3110
3111         while (1) {
3112                 /*
3113                  * I don't think this is needed since we're just marking our
3114                  * preallocated extent as written, but just in case it can't
3115                  * hurt.
3116                  */
3117                 if (last == 0) {
3118                         err = btrfs_run_delayed_refs(trans, root,
3119                                                      (unsigned long)-1);
3120                         if (err) /* File system offline */
3121                                 goto out;
3122                 }
3123
3124                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3125                 while (cache) {
3126                         /*
3127                          * Really this shouldn't happen, but it could if we
3128                          * couldn't write the entire preallocated extent and
3129                          * splitting the extent resulted in a new block.
3130                          */
3131                         if (cache->dirty) {
3132                                 btrfs_put_block_group(cache);
3133                                 goto again;
3134                         }
3135                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3136                                 break;
3137                         cache = next_block_group(root, cache);
3138                 }
3139                 if (!cache) {
3140                         if (last == 0)
3141                                 break;
3142                         last = 0;
3143                         continue;
3144                 }
3145
3146                 err = btrfs_write_out_cache(root, trans, cache, path);
3147
3148                 /*
3149                  * If we didn't have an error then the cache state is still
3150                  * NEED_WRITE, so we can set it to WRITTEN.
3151                  */
3152                 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3153                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3154                 last = cache->key.objectid + cache->key.offset;
3155                 btrfs_put_block_group(cache);
3156         }
3157 out:
3158
3159         btrfs_free_path(path);
3160         return err;
3161 }
3162
3163 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3164 {
3165         struct btrfs_block_group_cache *block_group;
3166         int readonly = 0;
3167
3168         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3169         if (!block_group || block_group->ro)
3170                 readonly = 1;
3171         if (block_group)
3172                 btrfs_put_block_group(block_group);
3173         return readonly;
3174 }
3175
3176 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3177                              u64 total_bytes, u64 bytes_used,
3178                              struct btrfs_space_info **space_info)
3179 {
3180         struct btrfs_space_info *found;
3181         int i;
3182         int factor;
3183
3184         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3185                      BTRFS_BLOCK_GROUP_RAID10))
3186                 factor = 2;
3187         else
3188                 factor = 1;
3189
3190         found = __find_space_info(info, flags);
3191         if (found) {
3192                 spin_lock(&found->lock);
3193                 found->total_bytes += total_bytes;
3194                 found->disk_total += total_bytes * factor;
3195                 found->bytes_used += bytes_used;
3196                 found->disk_used += bytes_used * factor;
3197                 found->full = 0;
3198                 spin_unlock(&found->lock);
3199                 *space_info = found;
3200                 return 0;
3201         }
3202         found = kzalloc(sizeof(*found), GFP_NOFS);
3203         if (!found)
3204                 return -ENOMEM;
3205
3206         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3207                 INIT_LIST_HEAD(&found->block_groups[i]);
3208         init_rwsem(&found->groups_sem);
3209         spin_lock_init(&found->lock);
3210         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3211         found->total_bytes = total_bytes;
3212         found->disk_total = total_bytes * factor;
3213         found->bytes_used = bytes_used;
3214         found->disk_used = bytes_used * factor;
3215         found->bytes_pinned = 0;
3216         found->bytes_reserved = 0;
3217         found->bytes_readonly = 0;
3218         found->bytes_may_use = 0;
3219         found->full = 0;
3220         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3221         found->chunk_alloc = 0;
3222         found->flush = 0;
3223         init_waitqueue_head(&found->wait);
3224         *space_info = found;
3225         list_add_rcu(&found->list, &info->space_info);
3226         return 0;
3227 }
3228
3229 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3230 {
3231         u64 extra_flags = chunk_to_extended(flags) &
3232                                 BTRFS_EXTENDED_PROFILE_MASK;
3233
3234         if (flags & BTRFS_BLOCK_GROUP_DATA)
3235                 fs_info->avail_data_alloc_bits |= extra_flags;
3236         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3237                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3238         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3239                 fs_info->avail_system_alloc_bits |= extra_flags;
3240 }
3241
3242 /*
3243  * returns target flags in extended format or 0 if restripe for this
3244  * chunk_type is not in progress
3245  *
3246  * should be called with either volume_mutex or balance_lock held
3247  */
3248 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3249 {
3250         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3251         u64 target = 0;
3252
3253         if (!bctl)
3254                 return 0;
3255
3256         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3257             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3258                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3259         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3260                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3261                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3262         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3263                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3264                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3265         }
3266
3267         return target;
3268 }
3269
3270 /*
3271  * @flags: available profiles in extended format (see ctree.h)
3272  *
3273  * Returns reduced profile in chunk format.  If profile changing is in
3274  * progress (either running or paused) picks the target profile (if it's
3275  * already available), otherwise falls back to plain reducing.
3276  */
3277 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3278 {
3279         /*
3280          * we add in the count of missing devices because we want
3281          * to make sure that any RAID levels on a degraded FS
3282          * continue to be honored.
3283          */
3284         u64 num_devices = root->fs_info->fs_devices->rw_devices +
3285                 root->fs_info->fs_devices->missing_devices;
3286         u64 target;
3287
3288         /*
3289          * see if restripe for this chunk_type is in progress, if so
3290          * try to reduce to the target profile
3291          */
3292         spin_lock(&root->fs_info->balance_lock);
3293         target = get_restripe_target(root->fs_info, flags);
3294         if (target) {
3295                 /* pick target profile only if it's already available */
3296                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3297                         spin_unlock(&root->fs_info->balance_lock);
3298                         return extended_to_chunk(target);
3299                 }
3300         }
3301         spin_unlock(&root->fs_info->balance_lock);
3302
3303         if (num_devices == 1)
3304                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
3305         if (num_devices < 4)
3306                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3307
3308         if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
3309             (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3310                       BTRFS_BLOCK_GROUP_RAID10))) {
3311                 flags &= ~BTRFS_BLOCK_GROUP_DUP;
3312         }
3313
3314         if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
3315             (flags & BTRFS_BLOCK_GROUP_RAID10)) {
3316                 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
3317         }
3318
3319         if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
3320             ((flags & BTRFS_BLOCK_GROUP_RAID1) |
3321              (flags & BTRFS_BLOCK_GROUP_RAID10) |
3322              (flags & BTRFS_BLOCK_GROUP_DUP))) {
3323                 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
3324         }
3325
3326         return extended_to_chunk(flags);
3327 }
3328
3329 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3330 {
3331         if (flags & BTRFS_BLOCK_GROUP_DATA)
3332                 flags |= root->fs_info->avail_data_alloc_bits;
3333         else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3334                 flags |= root->fs_info->avail_system_alloc_bits;
3335         else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3336                 flags |= root->fs_info->avail_metadata_alloc_bits;
3337
3338         return btrfs_reduce_alloc_profile(root, flags);
3339 }
3340
3341 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3342 {
3343         u64 flags;
3344
3345         if (data)
3346                 flags = BTRFS_BLOCK_GROUP_DATA;
3347         else if (root == root->fs_info->chunk_root)
3348                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3349         else
3350                 flags = BTRFS_BLOCK_GROUP_METADATA;
3351
3352         return get_alloc_profile(root, flags);
3353 }
3354
3355 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
3356 {
3357         BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
3358                                                        BTRFS_BLOCK_GROUP_DATA);
3359 }
3360
3361 /*
3362  * This will check the space that the inode allocates from to make sure we have
3363  * enough space for bytes.
3364  */
3365 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3366 {
3367         struct btrfs_space_info *data_sinfo;
3368         struct btrfs_root *root = BTRFS_I(inode)->root;
3369         u64 used;
3370         int ret = 0, committed = 0, alloc_chunk = 1;
3371
3372         /* make sure bytes are sectorsize aligned */
3373         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3374
3375         if (root == root->fs_info->tree_root ||
3376             BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3377                 alloc_chunk = 0;
3378                 committed = 1;
3379         }
3380
3381         data_sinfo = BTRFS_I(inode)->space_info;
3382         if (!data_sinfo)
3383                 goto alloc;
3384
3385 again:
3386         /* make sure we have enough space to handle the data first */
3387         spin_lock(&data_sinfo->lock);
3388         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3389                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3390                 data_sinfo->bytes_may_use;
3391
3392         if (used + bytes > data_sinfo->total_bytes) {
3393                 struct btrfs_trans_handle *trans;
3394
3395                 /*
3396                  * if we don't have enough free bytes in this space then we need
3397                  * to alloc a new chunk.
3398                  */
3399                 if (!data_sinfo->full && alloc_chunk) {
3400                         u64 alloc_target;
3401
3402                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3403                         spin_unlock(&data_sinfo->lock);
3404 alloc:
3405                         alloc_target = btrfs_get_alloc_profile(root, 1);
3406                         trans = btrfs_join_transaction(root);
3407                         if (IS_ERR(trans))
3408                                 return PTR_ERR(trans);
3409
3410                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3411                                              bytes + 2 * 1024 * 1024,
3412                                              alloc_target,
3413                                              CHUNK_ALLOC_NO_FORCE);
3414                         btrfs_end_transaction(trans, root);
3415                         if (ret < 0) {
3416                                 if (ret != -ENOSPC)
3417                                         return ret;
3418                                 else
3419                                         goto commit_trans;
3420                         }
3421
3422                         if (!data_sinfo) {
3423                                 btrfs_set_inode_space_info(root, inode);
3424                                 data_sinfo = BTRFS_I(inode)->space_info;
3425                         }
3426                         goto again;
3427                 }
3428
3429                 /*
3430                  * If we have less pinned bytes than we want to allocate then
3431                  * don't bother committing the transaction, it won't help us.
3432                  */
3433                 if (data_sinfo->bytes_pinned < bytes)
3434                         committed = 1;
3435                 spin_unlock(&data_sinfo->lock);
3436
3437                 /* commit the current transaction and try again */
3438 commit_trans:
3439                 if (!committed &&
3440                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3441                         committed = 1;
3442                         trans = btrfs_join_transaction(root);
3443                         if (IS_ERR(trans))
3444                                 return PTR_ERR(trans);
3445                         ret = btrfs_commit_transaction(trans, root);
3446                         if (ret)
3447                                 return ret;
3448                         goto again;
3449                 }
3450
3451                 return -ENOSPC;
3452         }
3453         data_sinfo->bytes_may_use += bytes;
3454         trace_btrfs_space_reservation(root->fs_info, "space_info",
3455                                       data_sinfo->flags, bytes, 1);
3456         spin_unlock(&data_sinfo->lock);
3457
3458         return 0;
3459 }
3460
3461 /*
3462  * Called if we need to clear a data reservation for this inode.
3463  */
3464 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3465 {
3466         struct btrfs_root *root = BTRFS_I(inode)->root;
3467         struct btrfs_space_info *data_sinfo;
3468
3469         /* make sure bytes are sectorsize aligned */
3470         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3471
3472         data_sinfo = BTRFS_I(inode)->space_info;
3473         spin_lock(&data_sinfo->lock);
3474         data_sinfo->bytes_may_use -= bytes;
3475         trace_btrfs_space_reservation(root->fs_info, "space_info",
3476                                       data_sinfo->flags, bytes, 0);
3477         spin_unlock(&data_sinfo->lock);
3478 }
3479
3480 static void force_metadata_allocation(struct btrfs_fs_info *info)
3481 {
3482         struct list_head *head = &info->space_info;
3483         struct btrfs_space_info *found;
3484
3485         rcu_read_lock();
3486         list_for_each_entry_rcu(found, head, list) {
3487                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3488                         found->force_alloc = CHUNK_ALLOC_FORCE;
3489         }
3490         rcu_read_unlock();
3491 }
3492
3493 static int should_alloc_chunk(struct btrfs_root *root,
3494                               struct btrfs_space_info *sinfo, u64 alloc_bytes,
3495                               int force)
3496 {
3497         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3498         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3499         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3500         u64 thresh;
3501
3502         if (force == CHUNK_ALLOC_FORCE)
3503                 return 1;
3504
3505         /*
3506          * We need to take into account the global rsv because for all intents
3507          * and purposes it's used space.  Don't worry about locking the
3508          * global_rsv, it doesn't change except when the transaction commits.
3509          */
3510         num_allocated += global_rsv->size;
3511
3512         /*
3513          * in limited mode, we want to have some free space up to
3514          * about 1% of the FS size.
3515          */
3516         if (force == CHUNK_ALLOC_LIMITED) {
3517                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3518                 thresh = max_t(u64, 64 * 1024 * 1024,
3519                                div_factor_fine(thresh, 1));
3520
3521                 if (num_bytes - num_allocated < thresh)
3522                         return 1;
3523         }
3524         thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3525
3526         /* 256MB or 2% of the FS */
3527         thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 2));
3528         /* system chunks need a much small threshold */
3529         if (sinfo->flags & BTRFS_BLOCK_GROUP_SYSTEM)
3530                 thresh = 32 * 1024 * 1024;
3531
3532         if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 8))
3533                 return 0;
3534         return 1;
3535 }
3536
3537 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3538 {
3539         u64 num_dev;
3540
3541         if (type & BTRFS_BLOCK_GROUP_RAID10 ||
3542             type & BTRFS_BLOCK_GROUP_RAID0)
3543                 num_dev = root->fs_info->fs_devices->rw_devices;
3544         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3545                 num_dev = 2;
3546         else
3547                 num_dev = 1;    /* DUP or single */
3548
3549         /* metadata for updaing devices and chunk tree */
3550         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3551 }
3552
3553 static void check_system_chunk(struct btrfs_trans_handle *trans,
3554                                struct btrfs_root *root, u64 type)
3555 {
3556         struct btrfs_space_info *info;
3557         u64 left;
3558         u64 thresh;
3559
3560         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3561         spin_lock(&info->lock);
3562         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3563                 info->bytes_reserved - info->bytes_readonly;
3564         spin_unlock(&info->lock);
3565
3566         thresh = get_system_chunk_thresh(root, type);
3567         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3568                 printk(KERN_INFO "left=%llu, need=%llu, flags=%llu\n",
3569                        left, thresh, type);
3570                 dump_space_info(info, 0, 0);
3571         }
3572
3573         if (left < thresh) {
3574                 u64 flags;
3575
3576                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3577                 btrfs_alloc_chunk(trans, root, flags);
3578         }
3579 }
3580
3581 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3582                           struct btrfs_root *extent_root, u64 alloc_bytes,
3583                           u64 flags, int force)
3584 {
3585         struct btrfs_space_info *space_info;
3586         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3587         int wait_for_alloc = 0;
3588         int ret = 0;
3589
3590         space_info = __find_space_info(extent_root->fs_info, flags);
3591         if (!space_info) {
3592                 ret = update_space_info(extent_root->fs_info, flags,
3593                                         0, 0, &space_info);
3594                 BUG_ON(ret); /* -ENOMEM */
3595         }
3596         BUG_ON(!space_info); /* Logic error */
3597
3598 again:
3599         spin_lock(&space_info->lock);
3600         if (force < space_info->force_alloc)
3601                 force = space_info->force_alloc;
3602         if (space_info->full) {
3603                 spin_unlock(&space_info->lock);
3604                 return 0;
3605         }
3606
3607         if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
3608                 spin_unlock(&space_info->lock);
3609                 return 0;
3610         } else if (space_info->chunk_alloc) {
3611                 wait_for_alloc = 1;
3612         } else {
3613                 space_info->chunk_alloc = 1;
3614         }
3615
3616         spin_unlock(&space_info->lock);
3617
3618         mutex_lock(&fs_info->chunk_mutex);
3619
3620         /*
3621          * The chunk_mutex is held throughout the entirety of a chunk
3622          * allocation, so once we've acquired the chunk_mutex we know that the
3623          * other guy is done and we need to recheck and see if we should
3624          * allocate.
3625          */
3626         if (wait_for_alloc) {
3627                 mutex_unlock(&fs_info->chunk_mutex);
3628                 wait_for_alloc = 0;
3629                 goto again;
3630         }
3631
3632         /*
3633          * If we have mixed data/metadata chunks we want to make sure we keep
3634          * allocating mixed chunks instead of individual chunks.
3635          */
3636         if (btrfs_mixed_space_info(space_info))
3637                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3638
3639         /*
3640          * if we're doing a data chunk, go ahead and make sure that
3641          * we keep a reasonable number of metadata chunks allocated in the
3642          * FS as well.
3643          */
3644         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3645                 fs_info->data_chunk_allocations++;
3646                 if (!(fs_info->data_chunk_allocations %
3647                       fs_info->metadata_ratio))
3648                         force_metadata_allocation(fs_info);
3649         }
3650
3651         /*
3652          * Check if we have enough space in SYSTEM chunk because we may need
3653          * to update devices.
3654          */
3655         check_system_chunk(trans, extent_root, flags);
3656
3657         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3658         if (ret < 0 && ret != -ENOSPC)
3659                 goto out;
3660
3661         spin_lock(&space_info->lock);
3662         if (ret)
3663                 space_info->full = 1;
3664         else
3665                 ret = 1;
3666
3667         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3668         space_info->chunk_alloc = 0;
3669         spin_unlock(&space_info->lock);
3670 out:
3671         mutex_unlock(&fs_info->chunk_mutex);
3672         return ret;
3673 }
3674
3675 /*
3676  * shrink metadata reservation for delalloc
3677  */
3678 static int shrink_delalloc(struct btrfs_root *root, u64 to_reclaim,
3679                            bool wait_ordered)
3680 {
3681         struct btrfs_block_rsv *block_rsv;
3682         struct btrfs_space_info *space_info;
3683         struct btrfs_trans_handle *trans;
3684         u64 reserved;
3685         u64 max_reclaim;
3686         u64 reclaimed = 0;
3687         long time_left;
3688         unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3689         int loops = 0;
3690         unsigned long progress;
3691
3692         trans = (struct btrfs_trans_handle *)current->journal_info;
3693         block_rsv = &root->fs_info->delalloc_block_rsv;
3694         space_info = block_rsv->space_info;
3695
3696         smp_mb();
3697         reserved = space_info->bytes_may_use;
3698         progress = space_info->reservation_progress;
3699
3700         if (reserved == 0)
3701                 return 0;
3702
3703         smp_mb();
3704         if (root->fs_info->delalloc_bytes == 0) {
3705                 if (trans)
3706                         return 0;
3707                 btrfs_wait_ordered_extents(root, 0, 0);
3708                 return 0;
3709         }
3710
3711         max_reclaim = min(reserved, to_reclaim);
3712         nr_pages = max_t(unsigned long, nr_pages,
3713                          max_reclaim >> PAGE_CACHE_SHIFT);
3714         while (loops < 1024) {
3715                 /* have the flusher threads jump in and do some IO */
3716                 smp_mb();
3717                 nr_pages = min_t(unsigned long, nr_pages,
3718                        root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
3719                 writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages,
3720                                                 WB_REASON_FS_FREE_SPACE);
3721
3722                 spin_lock(&space_info->lock);
3723                 if (reserved > space_info->bytes_may_use)
3724                         reclaimed += reserved - space_info->bytes_may_use;
3725                 reserved = space_info->bytes_may_use;
3726                 spin_unlock(&space_info->lock);
3727
3728                 loops++;
3729
3730                 if (reserved == 0 || reclaimed >= max_reclaim)
3731                         break;
3732
3733                 if (trans && trans->transaction->blocked)
3734                         return -EAGAIN;
3735
3736                 if (wait_ordered && !trans) {
3737                         btrfs_wait_ordered_extents(root, 0, 0);
3738                 } else {
3739                         time_left = schedule_timeout_interruptible(1);
3740
3741                         /* We were interrupted, exit */
3742                         if (time_left)
3743                                 break;
3744                 }
3745
3746                 /* we've kicked the IO a few times, if anything has been freed,
3747                  * exit.  There is no sense in looping here for a long time
3748                  * when we really need to commit the transaction, or there are
3749                  * just too many writers without enough free space
3750                  */
3751
3752                 if (loops > 3) {
3753                         smp_mb();
3754                         if (progress != space_info->reservation_progress)
3755                                 break;
3756                 }
3757
3758         }
3759
3760         return reclaimed >= to_reclaim;
3761 }
3762
3763 /**
3764  * maybe_commit_transaction - possibly commit the transaction if its ok to
3765  * @root - the root we're allocating for
3766  * @bytes - the number of bytes we want to reserve
3767  * @force - force the commit
3768  *
3769  * This will check to make sure that committing the transaction will actually
3770  * get us somewhere and then commit the transaction if it does.  Otherwise it
3771  * will return -ENOSPC.
3772  */
3773 static int may_commit_transaction(struct btrfs_root *root,
3774                                   struct btrfs_space_info *space_info,
3775                                   u64 bytes, int force)
3776 {
3777         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
3778         struct btrfs_trans_handle *trans;
3779
3780         trans = (struct btrfs_trans_handle *)current->journal_info;
3781         if (trans)
3782                 return -EAGAIN;
3783
3784         if (force)
3785                 goto commit;
3786
3787         /* See if there is enough pinned space to make this reservation */
3788         spin_lock(&space_info->lock);
3789         if (space_info->bytes_pinned >= bytes) {
3790                 spin_unlock(&space_info->lock);
3791                 goto commit;
3792         }
3793         spin_unlock(&space_info->lock);
3794
3795         /*
3796          * See if there is some space in the delayed insertion reservation for
3797          * this reservation.
3798          */
3799         if (space_info != delayed_rsv->space_info)
3800                 return -ENOSPC;
3801
3802         spin_lock(&space_info->lock);
3803         spin_lock(&delayed_rsv->lock);
3804         if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
3805                 spin_unlock(&delayed_rsv->lock);
3806                 spin_unlock(&space_info->lock);
3807                 return -ENOSPC;
3808         }
3809         spin_unlock(&delayed_rsv->lock);
3810         spin_unlock(&space_info->lock);
3811
3812 commit:
3813         trans = btrfs_join_transaction(root);
3814         if (IS_ERR(trans))
3815                 return -ENOSPC;
3816
3817         return btrfs_commit_transaction(trans, root);
3818 }
3819
3820 /**
3821  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
3822  * @root - the root we're allocating for
3823  * @block_rsv - the block_rsv we're allocating for
3824  * @orig_bytes - the number of bytes we want
3825  * @flush - wether or not we can flush to make our reservation
3826  *
3827  * This will reserve orgi_bytes number of bytes from the space info associated
3828  * with the block_rsv.  If there is not enough space it will make an attempt to
3829  * flush out space to make room.  It will do this by flushing delalloc if
3830  * possible or committing the transaction.  If flush is 0 then no attempts to
3831  * regain reservations will be made and this will fail if there is not enough
3832  * space already.
3833  */
3834 static int reserve_metadata_bytes(struct btrfs_root *root,
3835                                   struct btrfs_block_rsv *block_rsv,
3836                                   u64 orig_bytes, int flush)
3837 {
3838         struct btrfs_space_info *space_info = block_rsv->space_info;
3839         u64 used;
3840         u64 num_bytes = orig_bytes;
3841         int retries = 0;
3842         int ret = 0;
3843         bool committed = false;
3844         bool flushing = false;
3845         bool wait_ordered = false;
3846
3847 again:
3848         ret = 0;
3849         spin_lock(&space_info->lock);
3850         /*
3851          * We only want to wait if somebody other than us is flushing and we are
3852          * actually alloed to flush.
3853          */
3854         while (flush && !flushing && space_info->flush) {
3855                 spin_unlock(&space_info->lock);
3856                 /*
3857                  * If we have a trans handle we can't wait because the flusher
3858                  * may have to commit the transaction, which would mean we would
3859                  * deadlock since we are waiting for the flusher to finish, but
3860                  * hold the current transaction open.
3861                  */
3862                 if (current->journal_info)
3863                         return -EAGAIN;
3864                 ret = wait_event_killable(space_info->wait, !space_info->flush);
3865                 /* Must have been killed, return */
3866                 if (ret)
3867                         return -EINTR;
3868
3869                 spin_lock(&space_info->lock);
3870         }
3871
3872         ret = -ENOSPC;
3873         used = space_info->bytes_used + space_info->bytes_reserved +
3874                 space_info->bytes_pinned + space_info->bytes_readonly +
3875                 space_info->bytes_may_use;
3876
3877         /*
3878          * The idea here is that we've not already over-reserved the block group
3879          * then we can go ahead and save our reservation first and then start
3880          * flushing if we need to.  Otherwise if we've already overcommitted
3881          * lets start flushing stuff first and then come back and try to make
3882          * our reservation.
3883          */
3884         if (used <= space_info->total_bytes) {
3885                 if (used + orig_bytes <= space_info->total_bytes) {
3886                         space_info->bytes_may_use += orig_bytes;
3887                         trace_btrfs_space_reservation(root->fs_info,
3888                                 "space_info", space_info->flags, orig_bytes, 1);
3889                         ret = 0;
3890                 } else {
3891                         /*
3892                          * Ok set num_bytes to orig_bytes since we aren't
3893                          * overocmmitted, this way we only try and reclaim what
3894                          * we need.
3895                          */
3896                         num_bytes = orig_bytes;
3897                 }
3898         } else {
3899                 /*
3900                  * Ok we're over committed, set num_bytes to the overcommitted
3901                  * amount plus the amount of bytes that we need for this
3902                  * reservation.
3903                  */
3904                 wait_ordered = true;
3905                 num_bytes = used - space_info->total_bytes +
3906                         (orig_bytes * (retries + 1));
3907         }
3908
3909         if (ret) {
3910                 u64 profile = btrfs_get_alloc_profile(root, 0);
3911                 u64 avail;
3912
3913                 /*
3914                  * If we have a lot of space that's pinned, don't bother doing
3915                  * the overcommit dance yet and just commit the transaction.
3916                  */
3917                 avail = (space_info->total_bytes - space_info->bytes_used) * 8;
3918                 do_div(avail, 10);
3919                 if (space_info->bytes_pinned >= avail && flush && !committed) {
3920                         space_info->flush = 1;
3921                         flushing = true;
3922                         spin_unlock(&space_info->lock);
3923                         ret = may_commit_transaction(root, space_info,
3924                                                      orig_bytes, 1);
3925                         if (ret)
3926                                 goto out;
3927                         committed = true;
3928                         goto again;
3929                 }
3930
3931                 spin_lock(&root->fs_info->free_chunk_lock);
3932                 avail = root->fs_info->free_chunk_space;
3933
3934                 /*
3935                  * If we have dup, raid1 or raid10 then only half of the free
3936                  * space is actually useable.
3937                  */
3938                 if (profile & (BTRFS_BLOCK_GROUP_DUP |
3939                                BTRFS_BLOCK_GROUP_RAID1 |
3940                                BTRFS_BLOCK_GROUP_RAID10))
3941                         avail >>= 1;
3942
3943                 /*
3944                  * If we aren't flushing don't let us overcommit too much, say
3945                  * 1/8th of the space.  If we can flush, let it overcommit up to
3946                  * 1/2 of the space.
3947                  */
3948                 if (flush)
3949                         avail >>= 3;
3950                 else
3951                         avail >>= 1;
3952                  spin_unlock(&root->fs_info->free_chunk_lock);
3953
3954                 if (used + num_bytes < space_info->total_bytes + avail) {
3955                         space_info->bytes_may_use += orig_bytes;
3956                         trace_btrfs_space_reservation(root->fs_info,
3957                                 "space_info", space_info->flags, orig_bytes, 1);
3958                         ret = 0;
3959                 } else {
3960                         wait_ordered = true;
3961                 }
3962         }
3963
3964         /*
3965          * Couldn't make our reservation, save our place so while we're trying
3966          * to reclaim space we can actually use it instead of somebody else
3967          * stealing it from us.
3968          */
3969         if (ret && flush) {
3970                 flushing = true;
3971                 space_info->flush = 1;
3972         }
3973
3974         spin_unlock(&space_info->lock);
3975
3976         if (!ret || !flush)
3977                 goto out;
3978
3979         /*
3980          * We do synchronous shrinking since we don't actually unreserve
3981          * metadata until after the IO is completed.
3982          */
3983         ret = shrink_delalloc(root, num_bytes, wait_ordered);
3984         if (ret < 0)
3985                 goto out;
3986
3987         ret = 0;
3988
3989         /*
3990          * So if we were overcommitted it's possible that somebody else flushed
3991          * out enough space and we simply didn't have enough space to reclaim,
3992          * so go back around and try again.
3993          */
3994         if (retries < 2) {
3995                 wait_ordered = true;
3996                 retries++;
3997                 goto again;
3998         }
3999
4000         ret = -ENOSPC;
4001         if (committed)
4002                 goto out;
4003
4004         ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4005         if (!ret) {
4006                 committed = true;
4007                 goto again;
4008         }
4009
4010 out:
4011         if (flushing) {
4012                 spin_lock(&space_info->lock);
4013                 space_info->flush = 0;
4014                 wake_up_all(&space_info->wait);
4015                 spin_unlock(&space_info->lock);
4016         }
4017         return ret;
4018 }
4019
4020 static struct btrfs_block_rsv *get_block_rsv(
4021                                         const struct btrfs_trans_handle *trans,
4022                                         const struct btrfs_root *root)
4023 {
4024         struct btrfs_block_rsv *block_rsv = NULL;
4025
4026         if (root->ref_cows || root == root->fs_info->csum_root)
4027                 block_rsv = trans->block_rsv;
4028
4029         if (!block_rsv)
4030                 block_rsv = root->block_rsv;
4031
4032         if (!block_rsv)
4033                 block_rsv = &root->fs_info->empty_block_rsv;
4034
4035         return block_rsv;
4036 }
4037
4038 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4039                                u64 num_bytes)
4040 {
4041         int ret = -ENOSPC;
4042         spin_lock(&block_rsv->lock);
4043         if (block_rsv->reserved >= num_bytes) {
4044                 block_rsv->reserved -= num_bytes;
4045                 if (block_rsv->reserved < block_rsv->size)
4046                         block_rsv->full = 0;
4047                 ret = 0;
4048         }
4049         spin_unlock(&block_rsv->lock);
4050         return ret;
4051 }
4052
4053 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4054                                 u64 num_bytes, int update_size)
4055 {
4056         spin_lock(&block_rsv->lock);
4057         block_rsv->reserved += num_bytes;
4058         if (update_size)
4059                 block_rsv->size += num_bytes;
4060         else if (block_rsv->reserved >= block_rsv->size)
4061                 block_rsv->full = 1;
4062         spin_unlock(&block_rsv->lock);
4063 }
4064
4065 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4066                                     struct btrfs_block_rsv *block_rsv,
4067                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4068 {
4069         struct btrfs_space_info *space_info = block_rsv->space_info;
4070
4071         spin_lock(&block_rsv->lock);
4072         if (num_bytes == (u64)-1)
4073                 num_bytes = block_rsv->size;
4074         block_rsv->size -= num_bytes;
4075         if (block_rsv->reserved >= block_rsv->size) {
4076                 num_bytes = block_rsv->reserved - block_rsv->size;
4077                 block_rsv->reserved = block_rsv->size;
4078                 block_rsv->full = 1;
4079         } else {
4080                 num_bytes = 0;
4081         }
4082         spin_unlock(&block_rsv->lock);
4083
4084         if (num_bytes > 0) {
4085                 if (dest) {
4086                         spin_lock(&dest->lock);
4087                         if (!dest->full) {
4088                                 u64 bytes_to_add;
4089
4090                                 bytes_to_add = dest->size - dest->reserved;
4091                                 bytes_to_add = min(num_bytes, bytes_to_add);
4092                                 dest->reserved += bytes_to_add;
4093                                 if (dest->reserved >= dest->size)
4094                                         dest->full = 1;
4095                                 num_bytes -= bytes_to_add;
4096                         }
4097                         spin_unlock(&dest->lock);
4098                 }
4099                 if (num_bytes) {
4100                         spin_lock(&space_info->lock);
4101                         space_info->bytes_may_use -= num_bytes;
4102                         trace_btrfs_space_reservation(fs_info, "space_info",
4103                                         space_info->flags, num_bytes, 0);
4104                         space_info->reservation_progress++;
4105                         spin_unlock(&space_info->lock);
4106                 }
4107         }
4108 }
4109
4110 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4111                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4112 {
4113         int ret;
4114
4115         ret = block_rsv_use_bytes(src, num_bytes);
4116         if (ret)
4117                 return ret;
4118
4119         block_rsv_add_bytes(dst, num_bytes, 1);
4120         return 0;
4121 }
4122
4123 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
4124 {
4125         memset(rsv, 0, sizeof(*rsv));
4126         spin_lock_init(&rsv->lock);
4127 }
4128
4129 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
4130 {
4131         struct btrfs_block_rsv *block_rsv;
4132         struct btrfs_fs_info *fs_info = root->fs_info;
4133
4134         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4135         if (!block_rsv)
4136                 return NULL;
4137
4138         btrfs_init_block_rsv(block_rsv);
4139         block_rsv->space_info = __find_space_info(fs_info,
4140                                                   BTRFS_BLOCK_GROUP_METADATA);
4141         return block_rsv;
4142 }
4143
4144 void btrfs_free_block_rsv(struct btrfs_root *root,
4145                           struct btrfs_block_rsv *rsv)
4146 {
4147         btrfs_block_rsv_release(root, rsv, (u64)-1);
4148         kfree(rsv);
4149 }
4150
4151 static inline int __block_rsv_add(struct btrfs_root *root,
4152                                   struct btrfs_block_rsv *block_rsv,
4153                                   u64 num_bytes, int flush)
4154 {
4155         int ret;
4156
4157         if (num_bytes == 0)
4158                 return 0;
4159
4160         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4161         if (!ret) {
4162                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4163                 return 0;
4164         }
4165
4166         return ret;
4167 }
4168
4169 int btrfs_block_rsv_add(struct btrfs_root *root,
4170                         struct btrfs_block_rsv *block_rsv,
4171                         u64 num_bytes)
4172 {
4173         return __block_rsv_add(root, block_rsv, num_bytes, 1);
4174 }
4175
4176 int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
4177                                 struct btrfs_block_rsv *block_rsv,
4178                                 u64 num_bytes)
4179 {
4180         return __block_rsv_add(root, block_rsv, num_bytes, 0);
4181 }
4182
4183 int btrfs_block_rsv_check(struct btrfs_root *root,
4184                           struct btrfs_block_rsv *block_rsv, int min_factor)
4185 {
4186         u64 num_bytes = 0;
4187         int ret = -ENOSPC;
4188
4189         if (!block_rsv)
4190                 return 0;
4191
4192         spin_lock(&block_rsv->lock);
4193         num_bytes = div_factor(block_rsv->size, min_factor);
4194         if (block_rsv->reserved >= num_bytes)
4195                 ret = 0;
4196         spin_unlock(&block_rsv->lock);
4197
4198         return ret;
4199 }
4200
4201 static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
4202                                            struct btrfs_block_rsv *block_rsv,
4203                                            u64 min_reserved, int flush)
4204 {
4205         u64 num_bytes = 0;
4206         int ret = -ENOSPC;
4207
4208         if (!block_rsv)
4209                 return 0;
4210
4211         spin_lock(&block_rsv->lock);
4212         num_bytes = min_reserved;
4213         if (block_rsv->reserved >= num_bytes)
4214                 ret = 0;
4215         else
4216                 num_bytes -= block_rsv->reserved;
4217         spin_unlock(&block_rsv->lock);
4218
4219         if (!ret)
4220                 return 0;
4221
4222         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4223         if (!ret) {
4224                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4225                 return 0;
4226         }
4227
4228         return ret;
4229 }
4230
4231 int btrfs_block_rsv_refill(struct btrfs_root *root,
4232                            struct btrfs_block_rsv *block_rsv,
4233                            u64 min_reserved)
4234 {
4235         return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
4236 }
4237
4238 int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
4239                                    struct btrfs_block_rsv *block_rsv,
4240                                    u64 min_reserved)
4241 {
4242         return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
4243 }
4244
4245 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4246                             struct btrfs_block_rsv *dst_rsv,
4247                             u64 num_bytes)
4248 {
4249         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4250 }
4251
4252 void btrfs_block_rsv_release(struct btrfs_root *root,
4253                              struct btrfs_block_rsv *block_rsv,
4254                              u64 num_bytes)
4255 {
4256         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4257         if (global_rsv->full || global_rsv == block_rsv ||
4258             block_rsv->space_info != global_rsv->space_info)
4259                 global_rsv = NULL;
4260         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4261                                 num_bytes);
4262 }
4263
4264 /*
4265  * helper to calculate size of global block reservation.
4266  * the desired value is sum of space used by extent tree,
4267  * checksum tree and root tree
4268  */
4269 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4270 {
4271         struct btrfs_space_info *sinfo;
4272         u64 num_bytes;
4273         u64 meta_used;
4274         u64 data_used;
4275         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4276
4277         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4278         spin_lock(&sinfo->lock);
4279         data_used = sinfo->bytes_used;
4280         spin_unlock(&sinfo->lock);
4281
4282         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4283         spin_lock(&sinfo->lock);
4284         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4285                 data_used = 0;
4286         meta_used = sinfo->bytes_used;
4287         spin_unlock(&sinfo->lock);
4288
4289         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4290                     csum_size * 2;
4291         num_bytes += div64_u64(data_used + meta_used, 50);
4292
4293         if (num_bytes * 3 > meta_used)
4294                 num_bytes = div64_u64(meta_used, 3);
4295
4296         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4297 }
4298
4299 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4300 {
4301         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4302         struct btrfs_space_info *sinfo = block_rsv->space_info;
4303         u64 num_bytes;
4304
4305         num_bytes = calc_global_metadata_size(fs_info);
4306
4307         spin_lock(&sinfo->lock);
4308         spin_lock(&block_rsv->lock);
4309
4310         block_rsv->size = num_bytes;
4311
4312         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4313                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4314                     sinfo->bytes_may_use;
4315
4316         if (sinfo->total_bytes > num_bytes) {
4317                 num_bytes = sinfo->total_bytes - num_bytes;
4318                 block_rsv->reserved += num_bytes;
4319                 sinfo->bytes_may_use += num_bytes;
4320                 trace_btrfs_space_reservation(fs_info, "space_info",
4321                                       sinfo->flags, num_bytes, 1);
4322         }
4323
4324         if (block_rsv->reserved >= block_rsv->size) {
4325                 num_bytes = block_rsv->reserved - block_rsv->size;
4326                 sinfo->bytes_may_use -= num_bytes;
4327                 trace_btrfs_space_reservation(fs_info, "space_info",
4328                                       sinfo->flags, num_bytes, 0);
4329                 sinfo->reservation_progress++;
4330                 block_rsv->reserved = block_rsv->size;
4331                 block_rsv->full = 1;
4332         }
4333
4334         spin_unlock(&block_rsv->lock);
4335         spin_unlock(&sinfo->lock);
4336 }
4337
4338 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4339 {
4340         struct btrfs_space_info *space_info;
4341
4342         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4343         fs_info->chunk_block_rsv.space_info = space_info;
4344
4345         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4346         fs_info->global_block_rsv.space_info = space_info;
4347         fs_info->delalloc_block_rsv.space_info = space_info;
4348         fs_info->trans_block_rsv.space_info = space_info;
4349         fs_info->empty_block_rsv.space_info = space_info;
4350         fs_info->delayed_block_rsv.space_info = space_info;
4351
4352         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4353         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4354         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4355         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4356         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4357
4358         update_global_block_rsv(fs_info);
4359 }
4360
4361 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4362 {
4363         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4364                                 (u64)-1);
4365         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4366         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4367         WARN_ON(fs_info->trans_block_rsv.size > 0);
4368         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4369         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4370         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4371         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4372         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4373 }
4374
4375 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4376                                   struct btrfs_root *root)
4377 {
4378         if (!trans->bytes_reserved)
4379                 return;
4380
4381         trace_btrfs_space_reservation(root->fs_info, "transaction",
4382                                       trans->transid, trans->bytes_reserved, 0);
4383         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4384         trans->bytes_reserved = 0;
4385 }
4386
4387 /* Can only return 0 or -ENOSPC */
4388 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4389                                   struct inode *inode)
4390 {
4391         struct btrfs_root *root = BTRFS_I(inode)->root;
4392         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4393         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4394
4395         /*
4396          * We need to hold space in order to delete our orphan item once we've
4397          * added it, so this takes the reservation so we can release it later
4398          * when we are truly done with the orphan item.
4399          */
4400         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4401         trace_btrfs_space_reservation(root->fs_info, "orphan",
4402                                       btrfs_ino(inode), num_bytes, 1);
4403         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4404 }
4405
4406 void btrfs_orphan_release_metadata(struct inode *inode)
4407 {
4408         struct btrfs_root *root = BTRFS_I(inode)->root;
4409         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4410         trace_btrfs_space_reservation(root->fs_info, "orphan",
4411                                       btrfs_ino(inode), num_bytes, 0);
4412         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4413 }
4414
4415 int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
4416                                 struct btrfs_pending_snapshot *pending)
4417 {
4418         struct btrfs_root *root = pending->root;
4419         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4420         struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
4421         /*
4422          * two for root back/forward refs, two for directory entries
4423          * and one for root of the snapshot.
4424          */
4425         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
4426         dst_rsv->space_info = src_rsv->space_info;
4427         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4428 }
4429
4430 /**
4431  * drop_outstanding_extent - drop an outstanding extent
4432  * @inode: the inode we're dropping the extent for
4433  *
4434  * This is called when we are freeing up an outstanding extent, either called
4435  * after an error or after an extent is written.  This will return the number of
4436  * reserved extents that need to be freed.  This must be called with
4437  * BTRFS_I(inode)->lock held.
4438  */
4439 static unsigned drop_outstanding_extent(struct inode *inode)
4440 {
4441         unsigned drop_inode_space = 0;
4442         unsigned dropped_extents = 0;
4443
4444         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4445         BTRFS_I(inode)->outstanding_extents--;
4446
4447         if (BTRFS_I(inode)->outstanding_extents == 0 &&
4448             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4449                                &BTRFS_I(inode)->runtime_flags))
4450                 drop_inode_space = 1;
4451
4452         /*
4453          * If we have more or the same amount of outsanding extents than we have
4454          * reserved then we need to leave the reserved extents count alone.
4455          */
4456         if (BTRFS_I(inode)->outstanding_extents >=
4457             BTRFS_I(inode)->reserved_extents)
4458                 return drop_inode_space;
4459
4460         dropped_extents = BTRFS_I(inode)->reserved_extents -
4461                 BTRFS_I(inode)->outstanding_extents;
4462         BTRFS_I(inode)->reserved_extents -= dropped_extents;
4463         return dropped_extents + drop_inode_space;
4464 }
4465
4466 /**
4467  * calc_csum_metadata_size - return the amount of metada space that must be
4468  *      reserved/free'd for the given bytes.
4469  * @inode: the inode we're manipulating
4470  * @num_bytes: the number of bytes in question
4471  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4472  *
4473  * This adjusts the number of csum_bytes in the inode and then returns the
4474  * correct amount of metadata that must either be reserved or freed.  We
4475  * calculate how many checksums we can fit into one leaf and then divide the
4476  * number of bytes that will need to be checksumed by this value to figure out
4477  * how many checksums will be required.  If we are adding bytes then the number
4478  * may go up and we will return the number of additional bytes that must be
4479  * reserved.  If it is going down we will return the number of bytes that must
4480  * be freed.
4481  *
4482  * This must be called with BTRFS_I(inode)->lock held.
4483  */
4484 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4485                                    int reserve)
4486 {
4487         struct btrfs_root *root = BTRFS_I(inode)->root;
4488         u64 csum_size;
4489         int num_csums_per_leaf;
4490         int num_csums;
4491         int old_csums;
4492
4493         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4494             BTRFS_I(inode)->csum_bytes == 0)
4495                 return 0;
4496
4497         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4498         if (reserve)
4499                 BTRFS_I(inode)->csum_bytes += num_bytes;
4500         else
4501                 BTRFS_I(inode)->csum_bytes -= num_bytes;
4502         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4503         num_csums_per_leaf = (int)div64_u64(csum_size,
4504                                             sizeof(struct btrfs_csum_item) +
4505                                             sizeof(struct btrfs_disk_key));
4506         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4507         num_csums = num_csums + num_csums_per_leaf - 1;
4508         num_csums = num_csums / num_csums_per_leaf;
4509
4510         old_csums = old_csums + num_csums_per_leaf - 1;
4511         old_csums = old_csums / num_csums_per_leaf;
4512
4513         /* No change, no need to reserve more */
4514         if (old_csums == num_csums)
4515                 return 0;
4516
4517         if (reserve)
4518                 return btrfs_calc_trans_metadata_size(root,
4519                                                       num_csums - old_csums);
4520
4521         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4522 }
4523
4524 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4525 {
4526         struct btrfs_root *root = BTRFS_I(inode)->root;
4527         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4528         u64 to_reserve = 0;
4529         u64 csum_bytes;
4530         unsigned nr_extents = 0;
4531         int extra_reserve = 0;
4532         int flush = 1;
4533         int ret;
4534
4535         /* Need to be holding the i_mutex here if we aren't free space cache */
4536         if (btrfs_is_free_space_inode(root, inode))
4537                 flush = 0;
4538
4539         if (flush && btrfs_transaction_in_commit(root->fs_info))
4540                 schedule_timeout(1);
4541
4542         mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4543         num_bytes = ALIGN(num_bytes, root->sectorsize);
4544
4545         spin_lock(&BTRFS_I(inode)->lock);
4546         BTRFS_I(inode)->outstanding_extents++;
4547
4548         if (BTRFS_I(inode)->outstanding_extents >
4549             BTRFS_I(inode)->reserved_extents)
4550                 nr_extents = BTRFS_I(inode)->outstanding_extents -
4551                         BTRFS_I(inode)->reserved_extents;
4552
4553         /*
4554          * Add an item to reserve for updating the inode when we complete the
4555          * delalloc io.
4556          */
4557         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4558                       &BTRFS_I(inode)->runtime_flags)) {
4559                 nr_extents++;
4560                 extra_reserve = 1;
4561         }
4562
4563         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4564         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4565         csum_bytes = BTRFS_I(inode)->csum_bytes;
4566         spin_unlock(&BTRFS_I(inode)->lock);
4567
4568         if (root->fs_info->quota_enabled) {
4569                 ret = btrfs_qgroup_reserve(root, num_bytes +
4570                                            nr_extents * root->leafsize);
4571                 if (ret)
4572                         return ret;
4573         }
4574
4575         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4576         if (ret) {
4577                 u64 to_free = 0;
4578                 unsigned dropped;
4579
4580                 spin_lock(&BTRFS_I(inode)->lock);
4581                 dropped = drop_outstanding_extent(inode);
4582                 /*
4583                  * If the inodes csum_bytes is the same as the original
4584                  * csum_bytes then we know we haven't raced with any free()ers
4585                  * so we can just reduce our inodes csum bytes and carry on.
4586                  * Otherwise we have to do the normal free thing to account for
4587                  * the case that the free side didn't free up its reserve
4588                  * because of this outstanding reservation.
4589                  */
4590                 if (BTRFS_I(inode)->csum_bytes == csum_bytes)
4591                         calc_csum_metadata_size(inode, num_bytes, 0);
4592                 else
4593                         to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4594                 spin_unlock(&BTRFS_I(inode)->lock);
4595                 if (dropped)
4596                         to_free += btrfs_calc_trans_metadata_size(root, dropped);
4597
4598                 if (to_free) {
4599                         btrfs_block_rsv_release(root, block_rsv, to_free);
4600                         trace_btrfs_space_reservation(root->fs_info,
4601                                                       "delalloc",
4602                                                       btrfs_ino(inode),
4603                                                       to_free, 0);
4604                 }
4605                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4606                 return ret;
4607         }
4608
4609         spin_lock(&BTRFS_I(inode)->lock);
4610         if (extra_reserve) {
4611                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4612                         &BTRFS_I(inode)->runtime_flags);
4613                 nr_extents--;
4614         }
4615         BTRFS_I(inode)->reserved_extents += nr_extents;
4616         spin_unlock(&BTRFS_I(inode)->lock);
4617         mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4618
4619         if (to_reserve)
4620                 trace_btrfs_space_reservation(root->fs_info,"delalloc",
4621                                               btrfs_ino(inode), to_reserve, 1);
4622         block_rsv_add_bytes(block_rsv, to_reserve, 1);
4623
4624         return 0;
4625 }
4626
4627 /**
4628  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
4629  * @inode: the inode to release the reservation for
4630  * @num_bytes: the number of bytes we're releasing
4631  *
4632  * This will release the metadata reservation for an inode.  This can be called
4633  * once we complete IO for a given set of bytes to release their metadata
4634  * reservations.
4635  */
4636 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4637 {
4638         struct btrfs_root *root = BTRFS_I(inode)->root;
4639         u64 to_free = 0;
4640         unsigned dropped;
4641
4642         num_bytes = ALIGN(num_bytes, root->sectorsize);
4643         spin_lock(&BTRFS_I(inode)->lock);
4644         dropped = drop_outstanding_extent(inode);
4645
4646         to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4647         spin_unlock(&BTRFS_I(inode)->lock);
4648         if (dropped > 0)
4649                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4650
4651         trace_btrfs_space_reservation(root->fs_info, "delalloc",
4652                                       btrfs_ino(inode), to_free, 0);
4653         if (root->fs_info->quota_enabled) {
4654                 btrfs_qgroup_free(root, num_bytes +
4655                                         dropped * root->leafsize);
4656         }
4657
4658         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4659                                 to_free);
4660 }
4661
4662 /**
4663  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
4664  * @inode: inode we're writing to
4665  * @num_bytes: the number of bytes we want to allocate
4666  *
4667  * This will do the following things
4668  *
4669  * o reserve space in the data space info for num_bytes
4670  * o reserve space in the metadata space info based on number of outstanding
4671  *   extents and how much csums will be needed
4672  * o add to the inodes ->delalloc_bytes
4673  * o add it to the fs_info's delalloc inodes list.
4674  *
4675  * This will return 0 for success and -ENOSPC if there is no space left.
4676  */
4677 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4678 {
4679         int ret;
4680
4681         ret = btrfs_check_data_free_space(inode, num_bytes);
4682         if (ret)
4683                 return ret;
4684
4685         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4686         if (ret) {
4687                 btrfs_free_reserved_data_space(inode, num_bytes);
4688                 return ret;
4689         }
4690
4691         return 0;
4692 }
4693
4694 /**
4695  * btrfs_delalloc_release_space - release data and metadata space for delalloc
4696  * @inode: inode we're releasing space for
4697  * @num_bytes: the number of bytes we want to free up
4698  *
4699  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
4700  * called in the case that we don't need the metadata AND data reservations
4701  * anymore.  So if there is an error or we insert an inline extent.
4702  *
4703  * This function will release the metadata space that was not used and will
4704  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
4705  * list if there are no delalloc bytes left.
4706  */
4707 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4708 {
4709         btrfs_delalloc_release_metadata(inode, num_bytes);
4710         btrfs_free_reserved_data_space(inode, num_bytes);
4711 }
4712
4713 static int update_block_group(struct btrfs_trans_handle *trans,
4714                               struct btrfs_root *root,
4715                               u64 bytenr, u64 num_bytes, int alloc)
4716 {
4717         struct btrfs_block_group_cache *cache = NULL;
4718         struct btrfs_fs_info *info = root->fs_info;
4719         u64 total = num_bytes;
4720         u64 old_val;
4721         u64 byte_in_group;
4722         int factor;
4723
4724         /* block accounting for super block */
4725         spin_lock(&info->delalloc_lock);
4726         old_val = btrfs_super_bytes_used(info->super_copy);
4727         if (alloc)
4728                 old_val += num_bytes;
4729         else
4730                 old_val -= num_bytes;
4731         btrfs_set_super_bytes_used(info->super_copy, old_val);
4732         spin_unlock(&info->delalloc_lock);
4733
4734         while (total) {
4735                 cache = btrfs_lookup_block_group(info, bytenr);
4736                 if (!cache)
4737                         return -ENOENT;
4738                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4739                                     BTRFS_BLOCK_GROUP_RAID1 |
4740                                     BTRFS_BLOCK_GROUP_RAID10))
4741                         factor = 2;
4742                 else
4743                         factor = 1;
4744                 /*
4745                  * If this block group has free space cache written out, we
4746                  * need to make sure to load it if we are removing space.  This
4747                  * is because we need the unpinning stage to actually add the
4748                  * space back to the block group, otherwise we will leak space.
4749                  */
4750                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
4751                         cache_block_group(cache, trans, NULL, 1);
4752
4753                 byte_in_group = bytenr - cache->key.objectid;
4754                 WARN_ON(byte_in_group > cache->key.offset);
4755
4756                 spin_lock(&cache->space_info->lock);
4757                 spin_lock(&cache->lock);
4758
4759                 if (btrfs_test_opt(root, SPACE_CACHE) &&
4760                     cache->disk_cache_state < BTRFS_DC_CLEAR)
4761                         cache->disk_cache_state = BTRFS_DC_CLEAR;
4762
4763                 cache->dirty = 1;
4764                 old_val = btrfs_block_group_used(&cache->item);
4765                 num_bytes = min(total, cache->key.offset - byte_in_group);
4766                 if (alloc) {
4767                         old_val += num_bytes;
4768                         btrfs_set_block_group_used(&cache->item, old_val);
4769                         cache->reserved -= num_bytes;
4770                         cache->space_info->bytes_reserved -= num_bytes;
4771                         cache->space_info->bytes_used += num_bytes;
4772                         cache->space_info->disk_used += num_bytes * factor;
4773                         spin_unlock(&cache->lock);
4774                         spin_unlock(&cache->space_info->lock);
4775                 } else {
4776                         old_val -= num_bytes;
4777                         btrfs_set_block_group_used(&cache->item, old_val);
4778                         cache->pinned += num_bytes;
4779                         cache->space_info->bytes_pinned += num_bytes;
4780                         cache->space_info->bytes_used -= num_bytes;
4781                         cache->space_info->disk_used -= num_bytes * factor;
4782                         spin_unlock(&cache->lock);
4783                         spin_unlock(&cache->space_info->lock);
4784
4785                         set_extent_dirty(info->pinned_extents,
4786                                          bytenr, bytenr + num_bytes - 1,
4787                                          GFP_NOFS | __GFP_NOFAIL);
4788                 }
4789                 btrfs_put_block_group(cache);
4790                 total -= num_bytes;
4791                 bytenr += num_bytes;
4792         }
4793         return 0;
4794 }
4795
4796 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4797 {
4798         struct btrfs_block_group_cache *cache;
4799         u64 bytenr;
4800
4801         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4802         if (!cache)
4803                 return 0;
4804
4805         bytenr = cache->key.objectid;
4806         btrfs_put_block_group(cache);
4807
4808         return bytenr;
4809 }
4810
4811 static int pin_down_extent(struct btrfs_root *root,
4812                            struct btrfs_block_group_cache *cache,
4813                            u64 bytenr, u64 num_bytes, int reserved)
4814 {
4815         spin_lock(&cache->space_info->lock);
4816         spin_lock(&cache->lock);
4817         cache->pinned += num_bytes;
4818         cache->space_info->bytes_pinned += num_bytes;
4819         if (reserved) {
4820                 cache->reserved -= num_bytes;
4821                 cache->space_info->bytes_reserved -= num_bytes;
4822         }
4823         spin_unlock(&cache->lock);
4824         spin_unlock(&cache->space_info->lock);
4825
4826         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4827                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4828         return 0;
4829 }
4830
4831 /*
4832  * this function must be called within transaction
4833  */
4834 int btrfs_pin_extent(struct btrfs_root *root,
4835                      u64 bytenr, u64 num_bytes, int reserved)
4836 {
4837         struct btrfs_block_group_cache *cache;
4838
4839         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4840         BUG_ON(!cache); /* Logic error */
4841
4842         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4843
4844         btrfs_put_block_group(cache);
4845         return 0;
4846 }
4847
4848 /*
4849  * this function must be called within transaction
4850  */
4851 int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
4852                                     struct btrfs_root *root,
4853                                     u64 bytenr, u64 num_bytes)
4854 {
4855         struct btrfs_block_group_cache *cache;
4856
4857         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4858         BUG_ON(!cache); /* Logic error */
4859
4860         /*
4861          * pull in the free space cache (if any) so that our pin
4862          * removes the free space from the cache.  We have load_only set
4863          * to one because the slow code to read in the free extents does check
4864          * the pinned extents.
4865          */
4866         cache_block_group(cache, trans, root, 1);
4867
4868         pin_down_extent(root, cache, bytenr, num_bytes, 0);
4869
4870         /* remove us from the free space cache (if we're there at all) */
4871         btrfs_remove_free_space(cache, bytenr, num_bytes);
4872         btrfs_put_block_group(cache);
4873         return 0;
4874 }
4875
4876 /**
4877  * btrfs_update_reserved_bytes - update the block_group and space info counters
4878  * @cache:      The cache we are manipulating
4879  * @num_bytes:  The number of bytes in question
4880  * @reserve:    One of the reservation enums
4881  *
4882  * This is called by the allocator when it reserves space, or by somebody who is
4883  * freeing space that was never actually used on disk.  For example if you
4884  * reserve some space for a new leaf in transaction A and before transaction A
4885  * commits you free that leaf, you call this with reserve set to 0 in order to
4886  * clear the reservation.
4887  *
4888  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
4889  * ENOSPC accounting.  For data we handle the reservation through clearing the
4890  * delalloc bits in the io_tree.  We have to do this since we could end up
4891  * allocating less disk space for the amount of data we have reserved in the
4892  * case of compression.
4893  *
4894  * If this is a reservation and the block group has become read only we cannot
4895  * make the reservation and return -EAGAIN, otherwise this function always
4896  * succeeds.
4897  */
4898 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4899                                        u64 num_bytes, int reserve)
4900 {
4901         struct btrfs_space_info *space_info = cache->space_info;
4902         int ret = 0;
4903
4904         spin_lock(&space_info->lock);
4905         spin_lock(&cache->lock);
4906         if (reserve != RESERVE_FREE) {
4907                 if (cache->ro) {
4908                         ret = -EAGAIN;
4909                 } else {
4910                         cache->reserved += num_bytes;
4911                         space_info->bytes_reserved += num_bytes;
4912                         if (reserve == RESERVE_ALLOC) {
4913                                 trace_btrfs_space_reservation(cache->fs_info,
4914                                                 "space_info", space_info->flags,
4915                                                 num_bytes, 0);
4916                                 space_info->bytes_may_use -= num_bytes;
4917                         }
4918                 }
4919         } else {
4920                 if (cache->ro)
4921                         space_info->bytes_readonly += num_bytes;
4922                 cache->reserved -= num_bytes;
4923                 space_info->bytes_reserved -= num_bytes;
4924                 space_info->reservation_progress++;
4925         }
4926         spin_unlock(&cache->lock);
4927         spin_unlock(&space_info->lock);
4928         return ret;
4929 }
4930
4931 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4932                                 struct btrfs_root *root)
4933 {
4934         struct btrfs_fs_info *fs_info = root->fs_info;
4935         struct btrfs_caching_control *next;
4936         struct btrfs_caching_control *caching_ctl;
4937         struct btrfs_block_group_cache *cache;
4938
4939         down_write(&fs_info->extent_commit_sem);
4940
4941         list_for_each_entry_safe(caching_ctl, next,
4942                                  &fs_info->caching_block_groups, list) {
4943                 cache = caching_ctl->block_group;
4944                 if (block_group_cache_done(cache)) {
4945                         cache->last_byte_to_unpin = (u64)-1;
4946                         list_del_init(&caching_ctl->list);
4947                         put_caching_control(caching_ctl);
4948                 } else {
4949                         cache->last_byte_to_unpin = caching_ctl->progress;
4950                 }
4951         }
4952
4953         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4954                 fs_info->pinned_extents = &fs_info->freed_extents[1];
4955         else
4956                 fs_info->pinned_extents = &fs_info->freed_extents[0];
4957
4958         up_write(&fs_info->extent_commit_sem);
4959
4960         update_global_block_rsv(fs_info);
4961 }
4962
4963 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
4964 {
4965         struct btrfs_fs_info *fs_info = root->fs_info;
4966         struct btrfs_block_group_cache *cache = NULL;
4967         u64 len;
4968
4969         while (start <= end) {
4970                 if (!cache ||
4971                     start >= cache->key.objectid + cache->key.offset) {
4972                         if (cache)
4973                                 btrfs_put_block_group(cache);
4974                         cache = btrfs_lookup_block_group(fs_info, start);
4975                         BUG_ON(!cache); /* Logic error */
4976                 }
4977
4978                 len = cache->key.objectid + cache->key.offset - start;
4979                 len = min(len, end + 1 - start);
4980
4981                 if (start < cache->last_byte_to_unpin) {
4982                         len = min(len, cache->last_byte_to_unpin - start);
4983                         btrfs_add_free_space(cache, start, len);
4984                 }
4985
4986                 start += len;
4987
4988                 spin_lock(&cache->space_info->lock);
4989                 spin_lock(&cache->lock);
4990                 cache->pinned -= len;
4991                 cache->space_info->bytes_pinned -= len;
4992                 if (cache->ro)
4993                         cache->space_info->bytes_readonly += len;
4994                 spin_unlock(&cache->lock);
4995                 spin_unlock(&cache->space_info->lock);
4996         }
4997
4998         if (cache)
4999                 btrfs_put_block_group(cache);
5000         return 0;
5001 }
5002
5003 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5004                                struct btrfs_root *root)
5005 {
5006         struct btrfs_fs_info *fs_info = root->fs_info;
5007         struct extent_io_tree *unpin;
5008         u64 start;
5009         u64 end;
5010         int ret;
5011
5012         if (trans->aborted)
5013                 return 0;
5014
5015         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5016                 unpin = &fs_info->freed_extents[1];
5017         else
5018                 unpin = &fs_info->freed_extents[0];
5019
5020         while (1) {
5021                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5022                                             EXTENT_DIRTY);
5023                 if (ret)
5024                         break;
5025
5026                 if (btrfs_test_opt(root, DISCARD))
5027                         ret = btrfs_discard_extent(root, start,
5028                                                    end + 1 - start, NULL);
5029
5030                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5031                 unpin_extent_range(root, start, end);
5032                 cond_resched();
5033         }
5034
5035         return 0;
5036 }
5037
5038 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5039                                 struct btrfs_root *root,
5040                                 u64 bytenr, u64 num_bytes, u64 parent,
5041                                 u64 root_objectid, u64 owner_objectid,
5042                                 u64 owner_offset, int refs_to_drop,
5043                                 struct btrfs_delayed_extent_op *extent_op)
5044 {
5045         struct btrfs_key key;
5046         struct btrfs_path *path;
5047         struct btrfs_fs_info *info = root->fs_info;
5048         struct btrfs_root *extent_root = info->extent_root;
5049         struct extent_buffer *leaf;
5050         struct btrfs_extent_item *ei;
5051         struct btrfs_extent_inline_ref *iref;
5052         int ret;
5053         int is_data;
5054         int extent_slot = 0;
5055         int found_extent = 0;
5056         int num_to_del = 1;
5057         u32 item_size;
5058         u64 refs;
5059
5060         path = btrfs_alloc_path();
5061         if (!path)
5062                 return -ENOMEM;
5063
5064         path->reada = 1;
5065         path->leave_spinning = 1;
5066
5067         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5068         BUG_ON(!is_data && refs_to_drop != 1);
5069
5070         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5071                                     bytenr, num_bytes, parent,
5072                                     root_objectid, owner_objectid,
5073                                     owner_offset);
5074         if (ret == 0) {
5075                 extent_slot = path->slots[0];
5076                 while (extent_slot >= 0) {
5077                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5078                                               extent_slot);
5079                         if (key.objectid != bytenr)
5080                                 break;
5081                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5082                             key.offset == num_bytes) {
5083                                 found_extent = 1;
5084                                 break;
5085                         }
5086                         if (path->slots[0] - extent_slot > 5)
5087                                 break;
5088                         extent_slot--;
5089                 }
5090 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5091                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5092                 if (found_extent && item_size < sizeof(*ei))
5093                         found_extent = 0;
5094 #endif
5095                 if (!found_extent) {
5096                         BUG_ON(iref);
5097                         ret = remove_extent_backref(trans, extent_root, path,
5098                                                     NULL, refs_to_drop,
5099                                                     is_data);
5100                         if (ret)
5101                                 goto abort;
5102                         btrfs_release_path(path);
5103                         path->leave_spinning = 1;
5104
5105                         key.objectid = bytenr;
5106                         key.type = BTRFS_EXTENT_ITEM_KEY;
5107                         key.offset = num_bytes;
5108
5109                         ret = btrfs_search_slot(trans, extent_root,
5110                                                 &key, path, -1, 1);
5111                         if (ret) {
5112                                 printk(KERN_ERR "umm, got %d back from search"
5113                                        ", was looking for %llu\n", ret,
5114                                        (unsigned long long)bytenr);
5115                                 if (ret > 0)
5116                                         btrfs_print_leaf(extent_root,
5117                                                          path->nodes[0]);
5118                         }
5119                         if (ret < 0)
5120                                 goto abort;
5121                         extent_slot = path->slots[0];
5122                 }
5123         } else if (ret == -ENOENT) {
5124                 btrfs_print_leaf(extent_root, path->nodes[0]);
5125                 WARN_ON(1);
5126                 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
5127                        "parent %llu root %llu  owner %llu offset %llu\n",
5128                        (unsigned long long)bytenr,
5129                        (unsigned long long)parent,
5130                        (unsigned long long)root_objectid,
5131                        (unsigned long long)owner_objectid,
5132                        (unsigned long long)owner_offset);
5133         } else {
5134                 goto abort;
5135         }
5136
5137         leaf = path->nodes[0];
5138         item_size = btrfs_item_size_nr(leaf, extent_slot);
5139 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5140         if (item_size < sizeof(*ei)) {
5141                 BUG_ON(found_extent || extent_slot != path->slots[0]);
5142                 ret = convert_extent_item_v0(trans, extent_root, path,
5143                                              owner_objectid, 0);
5144                 if (ret < 0)
5145                         goto abort;
5146
5147                 btrfs_release_path(path);
5148                 path->leave_spinning = 1;
5149
5150                 key.objectid = bytenr;
5151                 key.type = BTRFS_EXTENT_ITEM_KEY;
5152                 key.offset = num_bytes;
5153
5154                 ret = btrfs_search_slot(trans, extent_root, &key, path,
5155                                         -1, 1);
5156                 if (ret) {
5157                         printk(KERN_ERR "umm, got %d back from search"
5158                                ", was looking for %llu\n", ret,
5159                                (unsigned long long)bytenr);
5160                         btrfs_print_leaf(extent_root, path->nodes[0]);
5161                 }
5162                 if (ret < 0)
5163                         goto abort;
5164                 extent_slot = path->slots[0];
5165                 leaf = path->nodes[0];
5166                 item_size = btrfs_item_size_nr(leaf, extent_slot);
5167         }
5168 #endif
5169         BUG_ON(item_size < sizeof(*ei));
5170         ei = btrfs_item_ptr(leaf, extent_slot,
5171                             struct btrfs_extent_item);
5172         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
5173                 struct btrfs_tree_block_info *bi;
5174                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5175                 bi = (struct btrfs_tree_block_info *)(ei + 1);
5176                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5177         }
5178
5179         refs = btrfs_extent_refs(leaf, ei);
5180         BUG_ON(refs < refs_to_drop);
5181         refs -= refs_to_drop;
5182
5183         if (refs > 0) {
5184                 if (extent_op)
5185                         __run_delayed_extent_op(extent_op, leaf, ei);
5186                 /*
5187                  * In the case of inline back ref, reference count will
5188                  * be updated by remove_extent_backref
5189                  */
5190                 if (iref) {
5191                         BUG_ON(!found_extent);
5192                 } else {
5193                         btrfs_set_extent_refs(leaf, ei, refs);
5194                         btrfs_mark_buffer_dirty(leaf);
5195                 }
5196                 if (found_extent) {
5197                         ret = remove_extent_backref(trans, extent_root, path,
5198                                                     iref, refs_to_drop,
5199                                                     is_data);
5200                         if (ret)
5201                                 goto abort;
5202                 }
5203         } else {
5204                 if (found_extent) {
5205                         BUG_ON(is_data && refs_to_drop !=
5206                                extent_data_ref_count(root, path, iref));
5207                         if (iref) {
5208                                 BUG_ON(path->slots[0] != extent_slot);
5209                         } else {
5210                                 BUG_ON(path->slots[0] != extent_slot + 1);
5211                                 path->slots[0] = extent_slot;
5212                                 num_to_del = 2;
5213                         }
5214                 }
5215
5216                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5217                                       num_to_del);
5218                 if (ret)
5219                         goto abort;
5220                 btrfs_release_path(path);
5221
5222                 if (is_data) {
5223                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5224                         if (ret)
5225                                 goto abort;
5226                 }
5227
5228                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
5229                 if (ret)
5230                         goto abort;
5231         }
5232 out:
5233         btrfs_free_path(path);
5234         return ret;
5235
5236 abort:
5237         btrfs_abort_transaction(trans, extent_root, ret);
5238         goto out;
5239 }
5240
5241 /*
5242  * when we free an block, it is possible (and likely) that we free the last
5243  * delayed ref for that extent as well.  This searches the delayed ref tree for
5244  * a given extent, and if there are no other delayed refs to be processed, it
5245  * removes it from the tree.
5246  */
5247 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5248                                       struct btrfs_root *root, u64 bytenr)
5249 {
5250         struct btrfs_delayed_ref_head *head;
5251         struct btrfs_delayed_ref_root *delayed_refs;
5252         struct btrfs_delayed_ref_node *ref;
5253         struct rb_node *node;
5254         int ret = 0;
5255
5256         delayed_refs = &trans->transaction->delayed_refs;
5257         spin_lock(&delayed_refs->lock);
5258         head = btrfs_find_delayed_ref_head(trans, bytenr);
5259         if (!head)
5260                 goto out;
5261
5262         node = rb_prev(&head->node.rb_node);
5263         if (!node)
5264                 goto out;
5265
5266         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5267
5268         /* there are still entries for this ref, we can't drop it */
5269         if (ref->bytenr == bytenr)
5270                 goto out;
5271
5272         if (head->extent_op) {
5273                 if (!head->must_insert_reserved)
5274                         goto out;
5275                 kfree(head->extent_op);
5276                 head->extent_op = NULL;
5277         }
5278
5279         /*
5280          * waiting for the lock here would deadlock.  If someone else has it
5281          * locked they are already in the process of dropping it anyway
5282          */
5283         if (!mutex_trylock(&head->mutex))
5284                 goto out;
5285
5286         /*
5287          * at this point we have a head with no other entries.  Go
5288          * ahead and process it.
5289          */
5290         head->node.in_tree = 0;
5291         rb_erase(&head->node.rb_node, &delayed_refs->root);
5292
5293         delayed_refs->num_entries--;
5294         if (waitqueue_active(&root->fs_info->tree_mod_seq_wait))
5295                 wake_up(&root->fs_info->tree_mod_seq_wait);
5296
5297         /*
5298          * we don't take a ref on the node because we're removing it from the
5299          * tree, so we just steal the ref the tree was holding.
5300          */
5301         delayed_refs->num_heads--;
5302         if (list_empty(&head->cluster))
5303                 delayed_refs->num_heads_ready--;
5304
5305         list_del_init(&head->cluster);
5306         spin_unlock(&delayed_refs->lock);
5307
5308         BUG_ON(head->extent_op);
5309         if (head->must_insert_reserved)
5310                 ret = 1;
5311
5312         mutex_unlock(&head->mutex);
5313         btrfs_put_delayed_ref(&head->node);
5314         return ret;
5315 out:
5316         spin_unlock(&delayed_refs->lock);
5317         return 0;
5318 }
5319
5320 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5321                            struct btrfs_root *root,
5322                            struct extent_buffer *buf,
5323                            u64 parent, int last_ref)
5324 {
5325         struct btrfs_block_group_cache *cache = NULL;
5326         int ret;
5327
5328         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5329                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5330                                         buf->start, buf->len,
5331                                         parent, root->root_key.objectid,
5332                                         btrfs_header_level(buf),
5333                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
5334                 BUG_ON(ret); /* -ENOMEM */
5335         }
5336
5337         if (!last_ref)
5338                 return;
5339
5340         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5341
5342         if (btrfs_header_generation(buf) == trans->transid) {
5343                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5344                         ret = check_ref_cleanup(trans, root, buf->start);
5345                         if (!ret)
5346                                 goto out;
5347                 }
5348
5349                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5350                         pin_down_extent(root, cache, buf->start, buf->len, 1);
5351                         goto out;
5352                 }
5353
5354                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5355
5356                 btrfs_add_free_space(cache, buf->start, buf->len);
5357                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5358         }
5359 out:
5360         /*
5361          * Deleting the buffer, clear the corrupt flag since it doesn't matter
5362          * anymore.
5363          */
5364         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5365         btrfs_put_block_group(cache);
5366 }
5367
5368 /* Can return -ENOMEM */
5369 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5370                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5371                       u64 owner, u64 offset, int for_cow)
5372 {
5373         int ret;
5374         struct btrfs_fs_info *fs_info = root->fs_info;
5375
5376         /*
5377          * tree log blocks never actually go into the extent allocation
5378          * tree, just update pinning info and exit early.
5379          */
5380         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5381                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
5382                 /* unlocks the pinned mutex */
5383                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
5384                 ret = 0;
5385         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5386                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
5387                                         num_bytes,
5388                                         parent, root_objectid, (int)owner,
5389                                         BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5390         } else {
5391                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
5392                                                 num_bytes,
5393                                                 parent, root_objectid, owner,
5394                                                 offset, BTRFS_DROP_DELAYED_REF,
5395                                                 NULL, for_cow);
5396         }
5397         return ret;
5398 }
5399
5400 static u64 stripe_align(struct btrfs_root *root, u64 val)
5401 {
5402         u64 mask = ((u64)root->stripesize - 1);
5403         u64 ret = (val + mask) & ~mask;
5404         return ret;
5405 }
5406
5407 /*
5408  * when we wait for progress in the block group caching, its because
5409  * our allocation attempt failed at least once.  So, we must sleep
5410  * and let some progress happen before we try again.
5411  *
5412  * This function will sleep at least once waiting for new free space to
5413  * show up, and then it will check the block group free space numbers
5414  * for our min num_bytes.  Another option is to have it go ahead
5415  * and look in the rbtree for a free extent of a given size, but this
5416  * is a good start.
5417  */
5418 static noinline int
5419 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
5420                                 u64 num_bytes)
5421 {
5422         struct btrfs_caching_control *caching_ctl;
5423         DEFINE_WAIT(wait);
5424
5425         caching_ctl = get_caching_control(cache);
5426         if (!caching_ctl)
5427                 return 0;
5428
5429         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
5430                    (cache->free_space_ctl->free_space >= num_bytes));
5431
5432         put_caching_control(caching_ctl);
5433         return 0;
5434 }
5435
5436 static noinline int
5437 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
5438 {
5439         struct btrfs_caching_control *caching_ctl;
5440         DEFINE_WAIT(wait);
5441
5442         caching_ctl = get_caching_control(cache);
5443         if (!caching_ctl)
5444                 return 0;
5445
5446         wait_event(caching_ctl->wait, block_group_cache_done(cache));
5447
5448         put_caching_control(caching_ctl);
5449         return 0;
5450 }
5451
5452 static int __get_block_group_index(u64 flags)
5453 {
5454         int index;
5455
5456         if (flags & BTRFS_BLOCK_GROUP_RAID10)
5457                 index = 0;
5458         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
5459                 index = 1;
5460         else if (flags & BTRFS_BLOCK_GROUP_DUP)
5461                 index = 2;
5462         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
5463                 index = 3;
5464         else
5465                 index = 4;
5466
5467         return index;
5468 }
5469
5470 static int get_block_group_index(struct btrfs_block_group_cache *cache)
5471 {
5472         return __get_block_group_index(cache->flags);
5473 }
5474
5475 enum btrfs_loop_type {
5476         LOOP_CACHING_NOWAIT = 0,
5477         LOOP_CACHING_WAIT = 1,
5478         LOOP_ALLOC_CHUNK = 2,
5479         LOOP_NO_EMPTY_SIZE = 3,
5480 };
5481
5482 /*
5483  * walks the btree of allocated extents and find a hole of a given size.
5484  * The key ins is changed to record the hole:
5485  * ins->objectid == block start
5486  * ins->flags = BTRFS_EXTENT_ITEM_KEY
5487  * ins->offset == number of blocks
5488  * Any available blocks before search_start are skipped.
5489  */
5490 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5491                                      struct btrfs_root *orig_root,
5492                                      u64 num_bytes, u64 empty_size,
5493                                      u64 hint_byte, struct btrfs_key *ins,
5494                                      u64 data)
5495 {
5496         int ret = 0;
5497         struct btrfs_root *root = orig_root->fs_info->extent_root;
5498         struct btrfs_free_cluster *last_ptr = NULL;
5499         struct btrfs_block_group_cache *block_group = NULL;
5500         struct btrfs_block_group_cache *used_block_group;
5501         u64 search_start = 0;
5502         int empty_cluster = 2 * 1024 * 1024;
5503         int allowed_chunk_alloc = 0;
5504         int done_chunk_alloc = 0;
5505         struct btrfs_space_info *space_info;
5506         int loop = 0;
5507         int index = 0;
5508         int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
5509                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
5510         bool found_uncached_bg = false;
5511         bool failed_cluster_refill = false;
5512         bool failed_alloc = false;
5513         bool use_cluster = true;
5514         bool have_caching_bg = false;
5515
5516         WARN_ON(num_bytes < root->sectorsize);
5517         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
5518         ins->objectid = 0;
5519         ins->offset = 0;
5520
5521         trace_find_free_extent(orig_root, num_bytes, empty_size, data);
5522
5523         space_info = __find_space_info(root->fs_info, data);
5524         if (!space_info) {
5525                 printk(KERN_ERR "No space info for %llu\n", data);
5526                 return -ENOSPC;
5527         }
5528
5529         /*
5530          * If the space info is for both data and metadata it means we have a
5531          * small filesystem and we can't use the clustering stuff.
5532          */
5533         if (btrfs_mixed_space_info(space_info))
5534                 use_cluster = false;
5535
5536         if (orig_root->ref_cows || empty_size)
5537                 allowed_chunk_alloc = 1;
5538
5539         if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
5540                 last_ptr = &root->fs_info->meta_alloc_cluster;
5541                 if (!btrfs_test_opt(root, SSD))
5542                         empty_cluster = 64 * 1024;
5543         }
5544
5545         if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
5546             btrfs_test_opt(root, SSD)) {
5547                 last_ptr = &root->fs_info->data_alloc_cluster;
5548         }
5549
5550         if (last_ptr) {
5551                 spin_lock(&last_ptr->lock);
5552                 if (last_ptr->block_group)
5553                         hint_byte = last_ptr->window_start;
5554                 spin_unlock(&last_ptr->lock);
5555         }
5556
5557         search_start = max(search_start, first_logical_byte(root, 0));
5558         search_start = max(search_start, hint_byte);
5559
5560         if (!last_ptr)
5561                 empty_cluster = 0;
5562
5563         if (search_start == hint_byte) {
5564                 block_group = btrfs_lookup_block_group(root->fs_info,
5565                                                        search_start);
5566                 used_block_group = block_group;
5567                 /*
5568                  * we don't want to use the block group if it doesn't match our
5569                  * allocation bits, or if its not cached.
5570                  *
5571                  * However if we are re-searching with an ideal block group
5572                  * picked out then we don't care that the block group is cached.
5573                  */
5574                 if (block_group && block_group_bits(block_group, data) &&
5575                     block_group->cached != BTRFS_CACHE_NO) {
5576                         down_read(&space_info->groups_sem);
5577                         if (list_empty(&block_group->list) ||
5578                             block_group->ro) {
5579                                 /*
5580                                  * someone is removing this block group,
5581                                  * we can't jump into the have_block_group
5582                                  * target because our list pointers are not
5583                                  * valid
5584                                  */
5585                                 btrfs_put_block_group(block_group);
5586                                 up_read(&space_info->groups_sem);
5587                         } else {
5588                                 index = get_block_group_index(block_group);
5589                                 goto have_block_group;
5590                         }
5591                 } else if (block_group) {
5592                         btrfs_put_block_group(block_group);
5593                 }
5594         }
5595 search:
5596         have_caching_bg = false;
5597         down_read(&space_info->groups_sem);
5598         list_for_each_entry(block_group, &space_info->block_groups[index],
5599                             list) {
5600                 u64 offset;
5601                 int cached;
5602
5603                 used_block_group = block_group;
5604                 btrfs_get_block_group(block_group);
5605                 search_start = block_group->key.objectid;
5606
5607                 /*
5608                  * this can happen if we end up cycling through all the
5609                  * raid types, but we want to make sure we only allocate
5610                  * for the proper type.
5611                  */
5612                 if (!block_group_bits(block_group, data)) {
5613                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
5614                                 BTRFS_BLOCK_GROUP_RAID1 |
5615                                 BTRFS_BLOCK_GROUP_RAID10;
5616
5617                         /*
5618                          * if they asked for extra copies and this block group
5619                          * doesn't provide them, bail.  This does allow us to
5620                          * fill raid0 from raid1.
5621                          */
5622                         if ((data & extra) && !(block_group->flags & extra))
5623                                 goto loop;
5624                 }
5625
5626 have_block_group:
5627                 cached = block_group_cache_done(block_group);
5628                 if (unlikely(!cached)) {
5629                         found_uncached_bg = true;
5630                         ret = cache_block_group(block_group, trans,
5631                                                 orig_root, 0);
5632                         BUG_ON(ret < 0);
5633                         ret = 0;
5634                 }
5635
5636                 if (unlikely(block_group->ro))
5637                         goto loop;
5638
5639                 /*
5640                  * Ok we want to try and use the cluster allocator, so
5641                  * lets look there
5642                  */
5643                 if (last_ptr) {
5644                         /*
5645                          * the refill lock keeps out other
5646                          * people trying to start a new cluster
5647                          */
5648                         spin_lock(&last_ptr->refill_lock);
5649                         used_block_group = last_ptr->block_group;
5650                         if (used_block_group != block_group &&
5651                             (!used_block_group ||
5652                              used_block_group->ro ||
5653                              !block_group_bits(used_block_group, data))) {
5654                                 used_block_group = block_group;
5655                                 goto refill_cluster;
5656                         }
5657
5658                         if (used_block_group != block_group)
5659                                 btrfs_get_block_group(used_block_group);
5660
5661                         offset = btrfs_alloc_from_cluster(used_block_group,
5662                           last_ptr, num_bytes, used_block_group->key.objectid);
5663                         if (offset) {
5664                                 /* we have a block, we're done */
5665                                 spin_unlock(&last_ptr->refill_lock);
5666                                 trace_btrfs_reserve_extent_cluster(root,
5667                                         block_group, search_start, num_bytes);
5668                                 goto checks;
5669                         }
5670
5671                         WARN_ON(last_ptr->block_group != used_block_group);
5672                         if (used_block_group != block_group) {
5673                                 btrfs_put_block_group(used_block_group);
5674                                 used_block_group = block_group;
5675                         }
5676 refill_cluster:
5677                         BUG_ON(used_block_group != block_group);
5678                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
5679                          * set up a new clusters, so lets just skip it
5680                          * and let the allocator find whatever block
5681                          * it can find.  If we reach this point, we
5682                          * will have tried the cluster allocator
5683                          * plenty of times and not have found
5684                          * anything, so we are likely way too
5685                          * fragmented for the clustering stuff to find
5686                          * anything.
5687                          *
5688                          * However, if the cluster is taken from the
5689                          * current block group, release the cluster
5690                          * first, so that we stand a better chance of
5691                          * succeeding in the unclustered
5692                          * allocation.  */
5693                         if (loop >= LOOP_NO_EMPTY_SIZE &&
5694                             last_ptr->block_group != block_group) {
5695                                 spin_unlock(&last_ptr->refill_lock);
5696                                 goto unclustered_alloc;
5697                         }
5698
5699                         /*
5700                          * this cluster didn't work out, free it and
5701                          * start over
5702                          */
5703                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5704
5705                         if (loop >= LOOP_NO_EMPTY_SIZE) {
5706                                 spin_unlock(&last_ptr->refill_lock);
5707                                 goto unclustered_alloc;
5708                         }
5709
5710                         /* allocate a cluster in this block group */
5711                         ret = btrfs_find_space_cluster(trans, root,
5712                                                block_group, last_ptr,
5713                                                search_start, num_bytes,
5714                                                empty_cluster + empty_size);
5715                         if (ret == 0) {
5716                                 /*
5717                                  * now pull our allocation out of this
5718                                  * cluster
5719                                  */
5720                                 offset = btrfs_alloc_from_cluster(block_group,
5721                                                   last_ptr, num_bytes,
5722                                                   search_start);
5723                                 if (offset) {
5724                                         /* we found one, proceed */
5725                                         spin_unlock(&last_ptr->refill_lock);
5726                                         trace_btrfs_reserve_extent_cluster(root,
5727                                                 block_group, search_start,
5728                                                 num_bytes);
5729                                         goto checks;
5730                                 }
5731                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
5732                                    && !failed_cluster_refill) {
5733                                 spin_unlock(&last_ptr->refill_lock);
5734
5735                                 failed_cluster_refill = true;
5736                                 wait_block_group_cache_progress(block_group,
5737                                        num_bytes + empty_cluster + empty_size);
5738                                 goto have_block_group;
5739                         }
5740
5741                         /*
5742                          * at this point we either didn't find a cluster
5743                          * or we weren't able to allocate a block from our
5744                          * cluster.  Free the cluster we've been trying
5745                          * to use, and go to the next block group
5746                          */
5747                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5748                         spin_unlock(&last_ptr->refill_lock);
5749                         goto loop;
5750                 }
5751
5752 unclustered_alloc:
5753                 spin_lock(&block_group->free_space_ctl->tree_lock);
5754                 if (cached &&
5755                     block_group->free_space_ctl->free_space <
5756                     num_bytes + empty_cluster + empty_size) {
5757                         spin_unlock(&block_group->free_space_ctl->tree_lock);
5758                         goto loop;
5759                 }
5760                 spin_unlock(&block_group->free_space_ctl->tree_lock);
5761
5762                 offset = btrfs_find_space_for_alloc(block_group, search_start,
5763                                                     num_bytes, empty_size);
5764                 /*
5765                  * If we didn't find a chunk, and we haven't failed on this
5766                  * block group before, and this block group is in the middle of
5767                  * caching and we are ok with waiting, then go ahead and wait
5768                  * for progress to be made, and set failed_alloc to true.
5769                  *
5770                  * If failed_alloc is true then we've already waited on this
5771                  * block group once and should move on to the next block group.
5772                  */
5773                 if (!offset && !failed_alloc && !cached &&
5774                     loop > LOOP_CACHING_NOWAIT) {
5775                         wait_block_group_cache_progress(block_group,
5776                                                 num_bytes + empty_size);
5777                         failed_alloc = true;
5778                         goto have_block_group;
5779                 } else if (!offset) {
5780                         if (!cached)
5781                                 have_caching_bg = true;
5782                         goto loop;
5783                 }
5784 checks:
5785                 search_start = stripe_align(root, offset);
5786
5787                 /* move on to the next group */
5788                 if (search_start + num_bytes >
5789                     used_block_group->key.objectid + used_block_group->key.offset) {
5790                         btrfs_add_free_space(used_block_group, offset, num_bytes);
5791                         goto loop;
5792                 }
5793
5794                 if (offset < search_start)
5795                         btrfs_add_free_space(used_block_group, offset,
5796                                              search_start - offset);
5797                 BUG_ON(offset > search_start);
5798
5799                 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
5800                                                   alloc_type);
5801                 if (ret == -EAGAIN) {
5802                         btrfs_add_free_space(used_block_group, offset, num_bytes);
5803                         goto loop;
5804                 }
5805
5806                 /* we are all good, lets return */
5807                 ins->objectid = search_start;
5808                 ins->offset = num_bytes;
5809
5810                 trace_btrfs_reserve_extent(orig_root, block_group,
5811                                            search_start, num_bytes);
5812                 if (offset < search_start)
5813                         btrfs_add_free_space(used_block_group, offset,
5814                                              search_start - offset);
5815                 BUG_ON(offset > search_start);
5816                 if (used_block_group != block_group)
5817                         btrfs_put_block_group(used_block_group);
5818                 btrfs_put_block_group(block_group);
5819                 break;
5820 loop:
5821                 failed_cluster_refill = false;
5822                 failed_alloc = false;
5823                 BUG_ON(index != get_block_group_index(block_group));
5824                 if (used_block_group != block_group)
5825                         btrfs_put_block_group(used_block_group);
5826                 btrfs_put_block_group(block_group);
5827         }
5828         up_read(&space_info->groups_sem);
5829
5830         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
5831                 goto search;
5832
5833         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5834                 goto search;
5835
5836         /*
5837          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5838          *                      caching kthreads as we move along
5839          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5840          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5841          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5842          *                      again
5843          */
5844         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
5845                 index = 0;
5846                 loop++;
5847                 if (loop == LOOP_ALLOC_CHUNK) {
5848                        if (allowed_chunk_alloc) {
5849                                 ret = do_chunk_alloc(trans, root, num_bytes +
5850                                                      2 * 1024 * 1024, data,
5851                                                      CHUNK_ALLOC_LIMITED);
5852                                 if (ret < 0) {
5853                                         btrfs_abort_transaction(trans,
5854                                                                 root, ret);
5855                                         goto out;
5856                                 }
5857                                 allowed_chunk_alloc = 0;
5858                                 if (ret == 1)
5859                                         done_chunk_alloc = 1;
5860                         } else if (!done_chunk_alloc &&
5861                                    space_info->force_alloc ==
5862                                    CHUNK_ALLOC_NO_FORCE) {
5863                                 space_info->force_alloc = CHUNK_ALLOC_LIMITED;
5864                         }
5865
5866                        /*
5867                         * We didn't allocate a chunk, go ahead and drop the
5868                         * empty size and loop again.
5869                         */
5870                        if (!done_chunk_alloc)
5871                                loop = LOOP_NO_EMPTY_SIZE;
5872                 }
5873
5874                 if (loop == LOOP_NO_EMPTY_SIZE) {
5875                         empty_size = 0;
5876                         empty_cluster = 0;
5877                 }
5878
5879                 goto search;
5880         } else if (!ins->objectid) {
5881                 ret = -ENOSPC;
5882         } else if (ins->objectid) {
5883                 ret = 0;
5884         }
5885 out:
5886
5887         return ret;
5888 }
5889
5890 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
5891                             int dump_block_groups)
5892 {
5893         struct btrfs_block_group_cache *cache;
5894         int index = 0;
5895
5896         spin_lock(&info->lock);
5897         printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
5898                (unsigned long long)info->flags,
5899                (unsigned long long)(info->total_bytes - info->bytes_used -
5900                                     info->bytes_pinned - info->bytes_reserved -
5901                                     info->bytes_readonly),
5902                (info->full) ? "" : "not ");
5903         printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
5904                "reserved=%llu, may_use=%llu, readonly=%llu\n",
5905                (unsigned long long)info->total_bytes,
5906                (unsigned long long)info->bytes_used,
5907                (unsigned long long)info->bytes_pinned,
5908                (unsigned long long)info->bytes_reserved,
5909                (unsigned long long)info->bytes_may_use,
5910                (unsigned long long)info->bytes_readonly);
5911         spin_unlock(&info->lock);
5912
5913         if (!dump_block_groups)
5914                 return;
5915
5916         down_read(&info->groups_sem);
5917 again:
5918         list_for_each_entry(cache, &info->block_groups[index], list) {
5919                 spin_lock(&cache->lock);
5920                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
5921                        "%llu pinned %llu reserved\n",
5922                        (unsigned long long)cache->key.objectid,
5923                        (unsigned long long)cache->key.offset,
5924                        (unsigned long long)btrfs_block_group_used(&cache->item),
5925                        (unsigned long long)cache->pinned,
5926                        (unsigned long long)cache->reserved);
5927                 btrfs_dump_free_space(cache, bytes);
5928                 spin_unlock(&cache->lock);
5929         }
5930         if (++index < BTRFS_NR_RAID_TYPES)
5931                 goto again;
5932         up_read(&info->groups_sem);
5933 }
5934
5935 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5936                          struct btrfs_root *root,
5937                          u64 num_bytes, u64 min_alloc_size,
5938                          u64 empty_size, u64 hint_byte,
5939                          struct btrfs_key *ins, u64 data)
5940 {
5941         bool final_tried = false;
5942         int ret;
5943
5944         data = btrfs_get_alloc_profile(root, data);
5945 again:
5946         /*
5947          * the only place that sets empty_size is btrfs_realloc_node, which
5948          * is not called recursively on allocations
5949          */
5950         if (empty_size || root->ref_cows) {
5951                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5952                                      num_bytes + 2 * 1024 * 1024, data,
5953                                      CHUNK_ALLOC_NO_FORCE);
5954                 if (ret < 0 && ret != -ENOSPC) {
5955                         btrfs_abort_transaction(trans, root, ret);
5956                         return ret;
5957                 }
5958         }
5959
5960         WARN_ON(num_bytes < root->sectorsize);
5961         ret = find_free_extent(trans, root, num_bytes, empty_size,
5962                                hint_byte, ins, data);
5963
5964         if (ret == -ENOSPC) {
5965                 if (!final_tried) {
5966                         num_bytes = num_bytes >> 1;
5967                         num_bytes = num_bytes & ~(root->sectorsize - 1);
5968                         num_bytes = max(num_bytes, min_alloc_size);
5969                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5970                                        num_bytes, data, CHUNK_ALLOC_FORCE);
5971                         if (ret < 0 && ret != -ENOSPC) {
5972                                 btrfs_abort_transaction(trans, root, ret);
5973                                 return ret;
5974                         }
5975                         if (num_bytes == min_alloc_size)
5976                                 final_tried = true;
5977                         goto again;
5978                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
5979                         struct btrfs_space_info *sinfo;
5980
5981                         sinfo = __find_space_info(root->fs_info, data);
5982                         printk(KERN_ERR "btrfs allocation failed flags %llu, "
5983                                "wanted %llu\n", (unsigned long long)data,
5984                                (unsigned long long)num_bytes);
5985                         if (sinfo)
5986                                 dump_space_info(sinfo, num_bytes, 1);
5987                 }
5988         }
5989
5990         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
5991
5992         return ret;
5993 }
5994
5995 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
5996                                         u64 start, u64 len, int pin)
5997 {
5998         struct btrfs_block_group_cache *cache;
5999         int ret = 0;
6000
6001         cache = btrfs_lookup_block_group(root->fs_info, start);
6002         if (!cache) {
6003                 printk(KERN_ERR "Unable to find block group for %llu\n",
6004                        (unsigned long long)start);
6005                 return -ENOSPC;
6006         }
6007
6008         if (btrfs_test_opt(root, DISCARD))
6009                 ret = btrfs_discard_extent(root, start, len, NULL);
6010
6011         if (pin)
6012                 pin_down_extent(root, cache, start, len, 1);
6013         else {
6014                 btrfs_add_free_space(cache, start, len);
6015                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6016         }
6017         btrfs_put_block_group(cache);
6018
6019         trace_btrfs_reserved_extent_free(root, start, len);
6020
6021         return ret;
6022 }
6023
6024 int btrfs_free_reserved_extent(struct btrfs_root *root,
6025                                         u64 start, u64 len)
6026 {
6027         return __btrfs_free_reserved_extent(root, start, len, 0);
6028 }
6029
6030 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6031                                        u64 start, u64 len)
6032 {
6033         return __btrfs_free_reserved_extent(root, start, len, 1);
6034 }
6035
6036 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6037                                       struct btrfs_root *root,
6038                                       u64 parent, u64 root_objectid,
6039                                       u64 flags, u64 owner, u64 offset,
6040                                       struct btrfs_key *ins, int ref_mod)
6041 {
6042         int ret;
6043         struct btrfs_fs_info *fs_info = root->fs_info;
6044         struct btrfs_extent_item *extent_item;
6045         struct btrfs_extent_inline_ref *iref;
6046         struct btrfs_path *path;
6047         struct extent_buffer *leaf;
6048         int type;
6049         u32 size;
6050
6051         if (parent > 0)
6052                 type = BTRFS_SHARED_DATA_REF_KEY;
6053         else
6054                 type = BTRFS_EXTENT_DATA_REF_KEY;
6055
6056         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6057
6058         path = btrfs_alloc_path();
6059         if (!path)
6060                 return -ENOMEM;
6061
6062         path->leave_spinning = 1;
6063         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6064                                       ins, size);
6065         if (ret) {
6066                 btrfs_free_path(path);
6067                 return ret;
6068         }
6069
6070         leaf = path->nodes[0];
6071         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6072                                      struct btrfs_extent_item);
6073         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6074         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6075         btrfs_set_extent_flags(leaf, extent_item,
6076                                flags | BTRFS_EXTENT_FLAG_DATA);
6077
6078         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6079         btrfs_set_extent_inline_ref_type(leaf, iref, type);
6080         if (parent > 0) {
6081                 struct btrfs_shared_data_ref *ref;
6082                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
6083                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6084                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6085         } else {
6086                 struct btrfs_extent_data_ref *ref;
6087                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6088                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6089                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6090                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6091                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6092         }
6093
6094         btrfs_mark_buffer_dirty(path->nodes[0]);
6095         btrfs_free_path(path);
6096
6097         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
6098         if (ret) { /* -ENOENT, logic error */
6099                 printk(KERN_ERR "btrfs update block group failed for %llu "
6100                        "%llu\n", (unsigned long long)ins->objectid,
6101                        (unsigned long long)ins->offset);
6102                 BUG();
6103         }
6104         return ret;
6105 }
6106
6107 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6108                                      struct btrfs_root *root,
6109                                      u64 parent, u64 root_objectid,
6110                                      u64 flags, struct btrfs_disk_key *key,
6111                                      int level, struct btrfs_key *ins)
6112 {
6113         int ret;
6114         struct btrfs_fs_info *fs_info = root->fs_info;
6115         struct btrfs_extent_item *extent_item;
6116         struct btrfs_tree_block_info *block_info;
6117         struct btrfs_extent_inline_ref *iref;
6118         struct btrfs_path *path;
6119         struct extent_buffer *leaf;
6120         u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
6121
6122         path = btrfs_alloc_path();
6123         if (!path)
6124                 return -ENOMEM;
6125
6126         path->leave_spinning = 1;
6127         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6128                                       ins, size);
6129         if (ret) {
6130                 btrfs_free_path(path);
6131                 return ret;
6132         }
6133
6134         leaf = path->nodes[0];
6135         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6136                                      struct btrfs_extent_item);
6137         btrfs_set_extent_refs(leaf, extent_item, 1);
6138         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6139         btrfs_set_extent_flags(leaf, extent_item,
6140                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6141         block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6142
6143         btrfs_set_tree_block_key(leaf, block_info, key);
6144         btrfs_set_tree_block_level(leaf, block_info, level);
6145
6146         iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6147         if (parent > 0) {
6148                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6149                 btrfs_set_extent_inline_ref_type(leaf, iref,
6150                                                  BTRFS_SHARED_BLOCK_REF_KEY);
6151                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6152         } else {
6153                 btrfs_set_extent_inline_ref_type(leaf, iref,
6154                                                  BTRFS_TREE_BLOCK_REF_KEY);
6155                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6156         }
6157
6158         btrfs_mark_buffer_dirty(leaf);
6159         btrfs_free_path(path);
6160
6161         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
6162         if (ret) { /* -ENOENT, logic error */
6163                 printk(KERN_ERR "btrfs update block group failed for %llu "
6164                        "%llu\n", (unsigned long long)ins->objectid,
6165                        (unsigned long long)ins->offset);
6166                 BUG();
6167         }
6168         return ret;
6169 }
6170
6171 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6172                                      struct btrfs_root *root,
6173                                      u64 root_objectid, u64 owner,
6174                                      u64 offset, struct btrfs_key *ins)
6175 {
6176         int ret;
6177
6178         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6179
6180         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6181                                          ins->offset, 0,
6182                                          root_objectid, owner, offset,
6183                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6184         return ret;
6185 }
6186
6187 /*
6188  * this is used by the tree logging recovery code.  It records that
6189  * an extent has been allocated and makes sure to clear the free
6190  * space cache bits as well
6191  */
6192 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6193                                    struct btrfs_root *root,
6194                                    u64 root_objectid, u64 owner, u64 offset,
6195                                    struct btrfs_key *ins)
6196 {
6197         int ret;
6198         struct btrfs_block_group_cache *block_group;
6199         struct btrfs_caching_control *caching_ctl;
6200         u64 start = ins->objectid;
6201         u64 num_bytes = ins->offset;
6202
6203         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6204         cache_block_group(block_group, trans, NULL, 0);
6205         caching_ctl = get_caching_control(block_group);
6206
6207         if (!caching_ctl) {
6208                 BUG_ON(!block_group_cache_done(block_group));
6209                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6210                 BUG_ON(ret); /* -ENOMEM */
6211         } else {
6212                 mutex_lock(&caching_ctl->mutex);
6213
6214                 if (start >= caching_ctl->progress) {
6215                         ret = add_excluded_extent(root, start, num_bytes);
6216                         BUG_ON(ret); /* -ENOMEM */
6217                 } else if (start + num_bytes <= caching_ctl->progress) {
6218                         ret = btrfs_remove_free_space(block_group,
6219                                                       start, num_bytes);
6220                         BUG_ON(ret); /* -ENOMEM */
6221                 } else {
6222                         num_bytes = caching_ctl->progress - start;
6223                         ret = btrfs_remove_free_space(block_group,
6224                                                       start, num_bytes);
6225                         BUG_ON(ret); /* -ENOMEM */
6226
6227                         start = caching_ctl->progress;
6228                         num_bytes = ins->objectid + ins->offset -
6229                                     caching_ctl->progress;
6230                         ret = add_excluded_extent(root, start, num_bytes);
6231                         BUG_ON(ret); /* -ENOMEM */
6232                 }
6233
6234                 mutex_unlock(&caching_ctl->mutex);
6235                 put_caching_control(caching_ctl);
6236         }
6237
6238         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6239                                           RESERVE_ALLOC_NO_ACCOUNT);
6240         BUG_ON(ret); /* logic error */
6241         btrfs_put_block_group(block_group);
6242         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6243                                          0, owner, offset, ins, 1);
6244         return ret;
6245 }
6246
6247 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
6248                                             struct btrfs_root *root,
6249                                             u64 bytenr, u32 blocksize,
6250                                             int level)
6251 {
6252         struct extent_buffer *buf;
6253
6254         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6255         if (!buf)
6256                 return ERR_PTR(-ENOMEM);
6257         btrfs_set_header_generation(buf, trans->transid);
6258         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6259         btrfs_tree_lock(buf);
6260         clean_tree_block(trans, root, buf);
6261         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6262
6263         btrfs_set_lock_blocking(buf);
6264         btrfs_set_buffer_uptodate(buf);
6265
6266         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6267                 /*
6268                  * we allow two log transactions at a time, use different
6269                  * EXENT bit to differentiate dirty pages.
6270                  */
6271                 if (root->log_transid % 2 == 0)
6272                         set_extent_dirty(&root->dirty_log_pages, buf->start,
6273                                         buf->start + buf->len - 1, GFP_NOFS);
6274                 else
6275                         set_extent_new(&root->dirty_log_pages, buf->start,
6276                                         buf->start + buf->len - 1, GFP_NOFS);
6277         } else {
6278                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6279                          buf->start + buf->len - 1, GFP_NOFS);
6280         }
6281         trans->blocks_used++;
6282         /* this returns a buffer locked for blocking */
6283         return buf;
6284 }
6285
6286 static struct btrfs_block_rsv *
6287 use_block_rsv(struct btrfs_trans_handle *trans,
6288               struct btrfs_root *root, u32 blocksize)
6289 {
6290         struct btrfs_block_rsv *block_rsv;
6291         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6292         int ret;
6293
6294         block_rsv = get_block_rsv(trans, root);
6295
6296         if (block_rsv->size == 0) {
6297                 ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
6298                 /*
6299                  * If we couldn't reserve metadata bytes try and use some from
6300                  * the global reserve.
6301                  */
6302                 if (ret && block_rsv != global_rsv) {
6303                         ret = block_rsv_use_bytes(global_rsv, blocksize);
6304                         if (!ret)
6305                                 return global_rsv;
6306                         return ERR_PTR(ret);
6307                 } else if (ret) {
6308                         return ERR_PTR(ret);
6309                 }
6310                 return block_rsv;
6311         }
6312
6313         ret = block_rsv_use_bytes(block_rsv, blocksize);
6314         if (!ret)
6315                 return block_rsv;
6316         if (ret) {
6317                 static DEFINE_RATELIMIT_STATE(_rs,
6318                                 DEFAULT_RATELIMIT_INTERVAL,
6319                                 /*DEFAULT_RATELIMIT_BURST*/ 2);
6320                 if (__ratelimit(&_rs)) {
6321                         printk(KERN_DEBUG "btrfs: block rsv returned %d\n", ret);
6322                         WARN_ON(1);
6323                 }
6324                 ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
6325                 if (!ret) {
6326                         return block_rsv;
6327                 } else if (ret && block_rsv != global_rsv) {
6328                         ret = block_rsv_use_bytes(global_rsv, blocksize);
6329                         if (!ret)
6330                                 return global_rsv;
6331                 }
6332         }
6333
6334         return ERR_PTR(-ENOSPC);
6335 }
6336
6337 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6338                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
6339 {
6340         block_rsv_add_bytes(block_rsv, blocksize, 0);
6341         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6342 }
6343
6344 /*
6345  * finds a free extent and does all the dirty work required for allocation
6346  * returns the key for the extent through ins, and a tree buffer for
6347  * the first block of the extent through buf.
6348  *
6349  * returns the tree buffer or NULL.
6350  */
6351 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6352                                         struct btrfs_root *root, u32 blocksize,
6353                                         u64 parent, u64 root_objectid,
6354                                         struct btrfs_disk_key *key, int level,
6355                                         u64 hint, u64 empty_size)
6356 {
6357         struct btrfs_key ins;
6358         struct btrfs_block_rsv *block_rsv;
6359         struct extent_buffer *buf;
6360         u64 flags = 0;
6361         int ret;
6362
6363
6364         block_rsv = use_block_rsv(trans, root, blocksize);
6365         if (IS_ERR(block_rsv))
6366                 return ERR_CAST(block_rsv);
6367
6368         ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
6369                                    empty_size, hint, &ins, 0);
6370         if (ret) {
6371                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6372                 return ERR_PTR(ret);
6373         }
6374
6375         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6376                                     blocksize, level);
6377         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
6378
6379         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6380                 if (parent == 0)
6381                         parent = ins.objectid;
6382                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6383         } else
6384                 BUG_ON(parent > 0);
6385
6386         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6387                 struct btrfs_delayed_extent_op *extent_op;
6388                 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
6389                 BUG_ON(!extent_op); /* -ENOMEM */
6390                 if (key)
6391                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
6392                 else
6393                         memset(&extent_op->key, 0, sizeof(extent_op->key));
6394                 extent_op->flags_to_set = flags;
6395                 extent_op->update_key = 1;
6396                 extent_op->update_flags = 1;
6397                 extent_op->is_data = 0;
6398
6399                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6400                                         ins.objectid,
6401                                         ins.offset, parent, root_objectid,
6402                                         level, BTRFS_ADD_DELAYED_EXTENT,
6403                                         extent_op, 0);
6404                 BUG_ON(ret); /* -ENOMEM */
6405         }
6406         return buf;
6407 }
6408
6409 struct walk_control {
6410         u64 refs[BTRFS_MAX_LEVEL];
6411         u64 flags[BTRFS_MAX_LEVEL];
6412         struct btrfs_key update_progress;
6413         int stage;
6414         int level;
6415         int shared_level;
6416         int update_ref;
6417         int keep_locks;
6418         int reada_slot;
6419         int reada_count;
6420         int for_reloc;
6421 };
6422
6423 #define DROP_REFERENCE  1
6424 #define UPDATE_BACKREF  2
6425
6426 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
6427                                      struct btrfs_root *root,
6428                                      struct walk_control *wc,
6429                                      struct btrfs_path *path)
6430 {
6431         u64 bytenr;
6432         u64 generation;
6433         u64 refs;
6434         u64 flags;
6435         u32 nritems;
6436         u32 blocksize;
6437         struct btrfs_key key;
6438         struct extent_buffer *eb;
6439         int ret;
6440         int slot;
6441         int nread = 0;
6442
6443         if (path->slots[wc->level] < wc->reada_slot) {
6444                 wc->reada_count = wc->reada_count * 2 / 3;
6445                 wc->reada_count = max(wc->reada_count, 2);
6446         } else {
6447                 wc->reada_count = wc->reada_count * 3 / 2;
6448                 wc->reada_count = min_t(int, wc->reada_count,
6449                                         BTRFS_NODEPTRS_PER_BLOCK(root));
6450         }
6451
6452         eb = path->nodes[wc->level];
6453         nritems = btrfs_header_nritems(eb);
6454         blocksize = btrfs_level_size(root, wc->level - 1);
6455
6456         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
6457                 if (nread >= wc->reada_count)
6458                         break;
6459
6460                 cond_resched();
6461                 bytenr = btrfs_node_blockptr(eb, slot);
6462                 generation = btrfs_node_ptr_generation(eb, slot);
6463
6464                 if (slot == path->slots[wc->level])
6465                         goto reada;
6466
6467                 if (wc->stage == UPDATE_BACKREF &&
6468                     generation <= root->root_key.offset)
6469                         continue;
6470
6471                 /* We don't lock the tree block, it's OK to be racy here */
6472                 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6473                                                &refs, &flags);
6474                 /* We don't care about errors in readahead. */
6475                 if (ret < 0)
6476                         continue;
6477                 BUG_ON(refs == 0);
6478
6479                 if (wc->stage == DROP_REFERENCE) {
6480                         if (refs == 1)
6481                                 goto reada;
6482
6483                         if (wc->level == 1 &&
6484                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6485                                 continue;
6486                         if (!wc->update_ref ||
6487                             generation <= root->root_key.offset)
6488                                 continue;
6489                         btrfs_node_key_to_cpu(eb, &key, slot);
6490                         ret = btrfs_comp_cpu_keys(&key,
6491                                                   &wc->update_progress);
6492                         if (ret < 0)
6493                                 continue;
6494                 } else {
6495                         if (wc->level == 1 &&
6496                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6497                                 continue;
6498                 }
6499 reada:
6500                 ret = readahead_tree_block(root, bytenr, blocksize,
6501                                            generation);
6502                 if (ret)
6503                         break;
6504                 nread++;
6505         }
6506         wc->reada_slot = slot;
6507 }
6508
6509 /*
6510  * hepler to process tree block while walking down the tree.
6511  *
6512  * when wc->stage == UPDATE_BACKREF, this function updates
6513  * back refs for pointers in the block.
6514  *
6515  * NOTE: return value 1 means we should stop walking down.
6516  */
6517 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
6518                                    struct btrfs_root *root,
6519                                    struct btrfs_path *path,
6520                                    struct walk_control *wc, int lookup_info)
6521 {
6522         int level = wc->level;
6523         struct extent_buffer *eb = path->nodes[level];
6524         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6525         int ret;
6526
6527         if (wc->stage == UPDATE_BACKREF &&
6528             btrfs_header_owner(eb) != root->root_key.objectid)
6529                 return 1;
6530
6531         /*
6532          * when reference count of tree block is 1, it won't increase
6533          * again. once full backref flag is set, we never clear it.
6534          */
6535         if (lookup_info &&
6536             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
6537              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
6538                 BUG_ON(!path->locks[level]);
6539                 ret = btrfs_lookup_extent_info(trans, root,
6540                                                eb->start, eb->len,
6541                                                &wc->refs[level],
6542                                                &wc->flags[level]);
6543                 BUG_ON(ret == -ENOMEM);
6544                 if (ret)
6545                         return ret;
6546                 BUG_ON(wc->refs[level] == 0);
6547         }
6548
6549         if (wc->stage == DROP_REFERENCE) {
6550                 if (wc->refs[level] > 1)
6551                         return 1;
6552
6553                 if (path->locks[level] && !wc->keep_locks) {
6554                         btrfs_tree_unlock_rw(eb, path->locks[level]);
6555                         path->locks[level] = 0;
6556                 }
6557                 return 0;
6558         }
6559
6560         /* wc->stage == UPDATE_BACKREF */
6561         if (!(wc->flags[level] & flag)) {
6562                 BUG_ON(!path->locks[level]);
6563                 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
6564                 BUG_ON(ret); /* -ENOMEM */
6565                 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
6566                 BUG_ON(ret); /* -ENOMEM */
6567                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
6568                                                   eb->len, flag, 0);
6569                 BUG_ON(ret); /* -ENOMEM */
6570                 wc->flags[level] |= flag;
6571         }
6572
6573         /*
6574          * the block is shared by multiple trees, so it's not good to
6575          * keep the tree lock
6576          */
6577         if (path->locks[level] && level > 0) {
6578                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6579                 path->locks[level] = 0;
6580         }
6581         return 0;
6582 }
6583
6584 /*
6585  * hepler to process tree block pointer.
6586  *
6587  * when wc->stage == DROP_REFERENCE, this function checks
6588  * reference count of the block pointed to. if the block
6589  * is shared and we need update back refs for the subtree
6590  * rooted at the block, this function changes wc->stage to
6591  * UPDATE_BACKREF. if the block is shared and there is no
6592  * need to update back, this function drops the reference
6593  * to the block.
6594  *
6595  * NOTE: return value 1 means we should stop walking down.
6596  */
6597 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6598                                  struct btrfs_root *root,
6599                                  struct btrfs_path *path,
6600                                  struct walk_control *wc, int *lookup_info)
6601 {
6602         u64 bytenr;
6603         u64 generation;
6604         u64 parent;
6605         u32 blocksize;
6606         struct btrfs_key key;
6607         struct extent_buffer *next;
6608         int level = wc->level;
6609         int reada = 0;
6610         int ret = 0;
6611
6612         generation = btrfs_node_ptr_generation(path->nodes[level],
6613                                                path->slots[level]);
6614         /*
6615          * if the lower level block was created before the snapshot
6616          * was created, we know there is no need to update back refs
6617          * for the subtree
6618          */
6619         if (wc->stage == UPDATE_BACKREF &&
6620             generation <= root->root_key.offset) {
6621                 *lookup_info = 1;
6622                 return 1;
6623         }
6624
6625         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
6626         blocksize = btrfs_level_size(root, level - 1);
6627
6628         next = btrfs_find_tree_block(root, bytenr, blocksize);
6629         if (!next) {
6630                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
6631                 if (!next)
6632                         return -ENOMEM;
6633                 reada = 1;
6634         }
6635         btrfs_tree_lock(next);
6636         btrfs_set_lock_blocking(next);
6637
6638         ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6639                                        &wc->refs[level - 1],
6640                                        &wc->flags[level - 1]);
6641         if (ret < 0) {
6642                 btrfs_tree_unlock(next);
6643                 return ret;
6644         }
6645
6646         BUG_ON(wc->refs[level - 1] == 0);
6647         *lookup_info = 0;
6648
6649         if (wc->stage == DROP_REFERENCE) {
6650                 if (wc->refs[level - 1] > 1) {
6651                         if (level == 1 &&
6652                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6653                                 goto skip;
6654
6655                         if (!wc->update_ref ||
6656                             generation <= root->root_key.offset)
6657                                 goto skip;
6658
6659                         btrfs_node_key_to_cpu(path->nodes[level], &key,
6660                                               path->slots[level]);
6661                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
6662                         if (ret < 0)
6663                                 goto skip;
6664
6665                         wc->stage = UPDATE_BACKREF;
6666                         wc->shared_level = level - 1;
6667                 }
6668         } else {
6669                 if (level == 1 &&
6670                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6671                         goto skip;
6672         }
6673
6674         if (!btrfs_buffer_uptodate(next, generation, 0)) {
6675                 btrfs_tree_unlock(next);
6676                 free_extent_buffer(next);
6677                 next = NULL;
6678                 *lookup_info = 1;
6679         }
6680
6681         if (!next) {
6682                 if (reada && level == 1)
6683                         reada_walk_down(trans, root, wc, path);
6684                 next = read_tree_block(root, bytenr, blocksize, generation);
6685                 if (!next)
6686                         return -EIO;
6687                 btrfs_tree_lock(next);
6688                 btrfs_set_lock_blocking(next);
6689         }
6690
6691         level--;
6692         BUG_ON(level != btrfs_header_level(next));
6693         path->nodes[level] = next;
6694         path->slots[level] = 0;
6695         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6696         wc->level = level;
6697         if (wc->level == 1)
6698                 wc->reada_slot = 0;
6699         return 0;
6700 skip:
6701         wc->refs[level - 1] = 0;
6702         wc->flags[level - 1] = 0;
6703         if (wc->stage == DROP_REFERENCE) {
6704                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6705                         parent = path->nodes[level]->start;
6706                 } else {
6707                         BUG_ON(root->root_key.objectid !=
6708                                btrfs_header_owner(path->nodes[level]));
6709                         parent = 0;
6710                 }
6711
6712                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
6713                                 root->root_key.objectid, level - 1, 0, 0);
6714                 BUG_ON(ret); /* -ENOMEM */
6715         }
6716         btrfs_tree_unlock(next);
6717         free_extent_buffer(next);
6718         *lookup_info = 1;
6719         return 1;
6720 }
6721
6722 /*
6723  * hepler to process tree block while walking up the tree.
6724  *
6725  * when wc->stage == DROP_REFERENCE, this function drops
6726  * reference count on the block.
6727  *
6728  * when wc->stage == UPDATE_BACKREF, this function changes
6729  * wc->stage back to DROP_REFERENCE if we changed wc->stage
6730  * to UPDATE_BACKREF previously while processing the block.
6731  *
6732  * NOTE: return value 1 means we should stop walking up.
6733  */
6734 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6735                                  struct btrfs_root *root,
6736                                  struct btrfs_path *path,
6737                                  struct walk_control *wc)
6738 {
6739         int ret;
6740         int level = wc->level;
6741         struct extent_buffer *eb = path->nodes[level];
6742         u64 parent = 0;
6743
6744         if (wc->stage == UPDATE_BACKREF) {
6745                 BUG_ON(wc->shared_level < level);
6746                 if (level < wc->shared_level)
6747                         goto out;
6748
6749                 ret = find_next_key(path, level + 1, &wc->update_progress);
6750                 if (ret > 0)
6751                         wc->update_ref = 0;
6752
6753                 wc->stage = DROP_REFERENCE;
6754                 wc->shared_level = -1;
6755                 path->slots[level] = 0;
6756
6757                 /*
6758                  * check reference count again if the block isn't locked.
6759                  * we should start walking down the tree again if reference
6760                  * count is one.
6761                  */
6762                 if (!path->locks[level]) {
6763                         BUG_ON(level == 0);
6764                         btrfs_tree_lock(eb);
6765                         btrfs_set_lock_blocking(eb);
6766                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6767
6768                         ret = btrfs_lookup_extent_info(trans, root,
6769                                                        eb->start, eb->len,
6770                                                        &wc->refs[level],
6771                                                        &wc->flags[level]);
6772                         if (ret < 0) {
6773                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6774                                 return ret;
6775                         }
6776                         BUG_ON(wc->refs[level] == 0);
6777                         if (wc->refs[level] == 1) {
6778                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6779                                 return 1;
6780                         }
6781                 }
6782         }
6783
6784         /* wc->stage == DROP_REFERENCE */
6785         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
6786
6787         if (wc->refs[level] == 1) {
6788                 if (level == 0) {
6789                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6790                                 ret = btrfs_dec_ref(trans, root, eb, 1,
6791                                                     wc->for_reloc);
6792                         else
6793                                 ret = btrfs_dec_ref(trans, root, eb, 0,
6794                                                     wc->for_reloc);
6795                         BUG_ON(ret); /* -ENOMEM */
6796                 }
6797                 /* make block locked assertion in clean_tree_block happy */
6798                 if (!path->locks[level] &&
6799                     btrfs_header_generation(eb) == trans->transid) {
6800                         btrfs_tree_lock(eb);
6801                         btrfs_set_lock_blocking(eb);
6802                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6803                 }
6804                 clean_tree_block(trans, root, eb);
6805         }
6806
6807         if (eb == root->node) {
6808                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6809                         parent = eb->start;
6810                 else
6811                         BUG_ON(root->root_key.objectid !=
6812                                btrfs_header_owner(eb));
6813         } else {
6814                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6815                         parent = path->nodes[level + 1]->start;
6816                 else
6817                         BUG_ON(root->root_key.objectid !=
6818                                btrfs_header_owner(path->nodes[level + 1]));
6819         }
6820
6821         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
6822 out:
6823         wc->refs[level] = 0;
6824         wc->flags[level] = 0;
6825         return 0;
6826 }
6827
6828 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6829                                    struct btrfs_root *root,
6830                                    struct btrfs_path *path,
6831                                    struct walk_control *wc)
6832 {
6833         int level = wc->level;
6834         int lookup_info = 1;
6835         int ret;
6836
6837         while (level >= 0) {
6838                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
6839                 if (ret > 0)
6840                         break;
6841
6842                 if (level == 0)
6843                         break;
6844
6845                 if (path->slots[level] >=
6846                     btrfs_header_nritems(path->nodes[level]))
6847                         break;
6848
6849                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
6850                 if (ret > 0) {
6851                         path->slots[level]++;
6852                         continue;
6853                 } else if (ret < 0)
6854                         return ret;
6855                 level = wc->level;
6856         }
6857         return 0;
6858 }
6859
6860 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6861                                  struct btrfs_root *root,
6862                                  struct btrfs_path *path,
6863                                  struct walk_control *wc, int max_level)
6864 {
6865         int level = wc->level;
6866         int ret;
6867
6868         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6869         while (level < max_level && path->nodes[level]) {
6870                 wc->level = level;
6871                 if (path->slots[level] + 1 <
6872                     btrfs_header_nritems(path->nodes[level])) {
6873                         path->slots[level]++;
6874                         return 0;
6875                 } else {
6876                         ret = walk_up_proc(trans, root, path, wc);
6877                         if (ret > 0)
6878                                 return 0;
6879
6880                         if (path->locks[level]) {
6881                                 btrfs_tree_unlock_rw(path->nodes[level],
6882                                                      path->locks[level]);
6883                                 path->locks[level] = 0;
6884                         }
6885                         free_extent_buffer(path->nodes[level]);
6886                         path->nodes[level] = NULL;
6887                         level++;
6888                 }
6889         }
6890         return 1;
6891 }
6892
6893 /*
6894  * drop a subvolume tree.
6895  *
6896  * this function traverses the tree freeing any blocks that only
6897  * referenced by the tree.
6898  *
6899  * when a shared tree block is found. this function decreases its
6900  * reference count by one. if update_ref is true, this function
6901  * also make sure backrefs for the shared block and all lower level
6902  * blocks are properly updated.
6903  */
6904 int btrfs_drop_snapshot(struct btrfs_root *root,
6905                          struct btrfs_block_rsv *block_rsv, int update_ref,
6906                          int for_reloc)
6907 {
6908         struct btrfs_path *path;
6909         struct btrfs_trans_handle *trans;
6910         struct btrfs_root *tree_root = root->fs_info->tree_root;
6911         struct btrfs_root_item *root_item = &root->root_item;
6912         struct walk_control *wc;
6913         struct btrfs_key key;
6914         int err = 0;
6915         int ret;
6916         int level;
6917
6918         path = btrfs_alloc_path();
6919         if (!path) {
6920                 err = -ENOMEM;
6921                 goto out;
6922         }
6923
6924         wc = kzalloc(sizeof(*wc), GFP_NOFS);
6925         if (!wc) {
6926                 btrfs_free_path(path);
6927                 err = -ENOMEM;
6928                 goto out;
6929         }
6930
6931         trans = btrfs_start_transaction(tree_root, 0);
6932         if (IS_ERR(trans)) {
6933                 err = PTR_ERR(trans);
6934                 goto out_free;
6935         }
6936
6937         if (block_rsv)
6938                 trans->block_rsv = block_rsv;
6939
6940         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
6941                 level = btrfs_header_level(root->node);
6942                 path->nodes[level] = btrfs_lock_root_node(root);
6943                 btrfs_set_lock_blocking(path->nodes[level]);
6944                 path->slots[level] = 0;
6945                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6946                 memset(&wc->update_progress, 0,
6947                        sizeof(wc->update_progress));
6948         } else {
6949                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
6950                 memcpy(&wc->update_progress, &key,
6951                        sizeof(wc->update_progress));
6952
6953                 level = root_item->drop_level;
6954                 BUG_ON(level == 0);
6955                 path->lowest_level = level;
6956                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6957                 path->lowest_level = 0;
6958                 if (ret < 0) {
6959                         err = ret;
6960                         goto out_end_trans;
6961                 }
6962                 WARN_ON(ret > 0);
6963
6964                 /*
6965                  * unlock our path, this is safe because only this
6966                  * function is allowed to delete this snapshot
6967                  */
6968                 btrfs_unlock_up_safe(path, 0);
6969
6970                 level = btrfs_header_level(root->node);
6971                 while (1) {
6972                         btrfs_tree_lock(path->nodes[level]);
6973                         btrfs_set_lock_blocking(path->nodes[level]);
6974
6975                         ret = btrfs_lookup_extent_info(trans, root,
6976                                                 path->nodes[level]->start,
6977                                                 path->nodes[level]->len,
6978                                                 &wc->refs[level],
6979                                                 &wc->flags[level]);
6980                         if (ret < 0) {
6981                                 err = ret;
6982                                 goto out_end_trans;
6983                         }
6984                         BUG_ON(wc->refs[level] == 0);
6985
6986                         if (level == root_item->drop_level)
6987                                 break;
6988
6989                         btrfs_tree_unlock(path->nodes[level]);
6990                         WARN_ON(wc->refs[level] != 1);
6991                         level--;
6992                 }
6993         }
6994
6995         wc->level = level;
6996         wc->shared_level = -1;
6997         wc->stage = DROP_REFERENCE;
6998         wc->update_ref = update_ref;
6999         wc->keep_locks = 0;
7000         wc->for_reloc = for_reloc;
7001         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7002
7003         while (1) {
7004                 ret = walk_down_tree(trans, root, path, wc);
7005                 if (ret < 0) {
7006                         err = ret;
7007                         break;
7008                 }
7009
7010                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7011                 if (ret < 0) {
7012                         err = ret;
7013                         break;
7014                 }
7015
7016                 if (ret > 0) {
7017                         BUG_ON(wc->stage != DROP_REFERENCE);
7018                         break;
7019                 }
7020
7021                 if (wc->stage == DROP_REFERENCE) {
7022                         level = wc->level;
7023                         btrfs_node_key(path->nodes[level],
7024                                        &root_item->drop_progress,
7025                                        path->slots[level]);
7026                         root_item->drop_level = level;
7027                 }
7028
7029                 BUG_ON(wc->level == 0);
7030                 if (btrfs_should_end_transaction(trans, tree_root)) {
7031                         ret = btrfs_update_root(trans, tree_root,
7032                                                 &root->root_key,
7033                                                 root_item);
7034                         if (ret) {
7035                                 btrfs_abort_transaction(trans, tree_root, ret);
7036                                 err = ret;
7037                                 goto out_end_trans;
7038                         }
7039
7040                         btrfs_end_transaction_throttle(trans, tree_root);
7041                         trans = btrfs_start_transaction(tree_root, 0);
7042                         if (IS_ERR(trans)) {
7043                                 err = PTR_ERR(trans);
7044                                 goto out_free;
7045                         }
7046                         if (block_rsv)
7047                                 trans->block_rsv = block_rsv;
7048                 }
7049         }
7050         btrfs_release_path(path);
7051         if (err)
7052                 goto out_end_trans;
7053
7054         ret = btrfs_del_root(trans, tree_root, &root->root_key);
7055         if (ret) {
7056                 btrfs_abort_transaction(trans, tree_root, ret);
7057                 goto out_end_trans;
7058         }
7059
7060         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7061                 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
7062                                            NULL, NULL);
7063                 if (ret < 0) {
7064                         btrfs_abort_transaction(trans, tree_root, ret);
7065                         err = ret;
7066                         goto out_end_trans;
7067                 } else if (ret > 0) {
7068                         /* if we fail to delete the orphan item this time
7069                          * around, it'll get picked up the next time.
7070                          *
7071                          * The most common failure here is just -ENOENT.
7072                          */
7073                         btrfs_del_orphan_item(trans, tree_root,
7074                                               root->root_key.objectid);
7075                 }
7076         }
7077
7078         if (root->in_radix) {
7079                 btrfs_free_fs_root(tree_root->fs_info, root);
7080         } else {
7081                 free_extent_buffer(root->node);
7082                 free_extent_buffer(root->commit_root);
7083                 kfree(root);
7084         }
7085 out_end_trans:
7086         btrfs_end_transaction_throttle(trans, tree_root);
7087 out_free:
7088         kfree(wc);
7089         btrfs_free_path(path);
7090 out:
7091         if (err)
7092                 btrfs_std_error(root->fs_info, err);
7093         return err;
7094 }
7095
7096 /*
7097  * drop subtree rooted at tree block 'node'.
7098  *
7099  * NOTE: this function will unlock and release tree block 'node'
7100  * only used by relocation code
7101  */
7102 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7103                         struct btrfs_root *root,
7104                         struct extent_buffer *node,
7105                         struct extent_buffer *parent)
7106 {
7107         struct btrfs_path *path;
7108         struct walk_control *wc;
7109         int level;
7110         int parent_level;
7111         int ret = 0;
7112         int wret;
7113
7114         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7115
7116         path = btrfs_alloc_path();
7117         if (!path)
7118                 return -ENOMEM;
7119
7120         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7121         if (!wc) {
7122                 btrfs_free_path(path);
7123                 return -ENOMEM;
7124         }
7125
7126         btrfs_assert_tree_locked(parent);
7127         parent_level = btrfs_header_level(parent);
7128         extent_buffer_get(parent);
7129         path->nodes[parent_level] = parent;
7130         path->slots[parent_level] = btrfs_header_nritems(parent);
7131
7132         btrfs_assert_tree_locked(node);
7133         level = btrfs_header_level(node);
7134         path->nodes[level] = node;
7135         path->slots[level] = 0;
7136         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7137
7138         wc->refs[parent_level] = 1;
7139         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7140         wc->level = level;
7141         wc->shared_level = -1;
7142         wc->stage = DROP_REFERENCE;
7143         wc->update_ref = 0;
7144         wc->keep_locks = 1;
7145         wc->for_reloc = 1;
7146         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7147
7148         while (1) {
7149                 wret = walk_down_tree(trans, root, path, wc);
7150                 if (wret < 0) {
7151                         ret = wret;
7152                         break;
7153                 }
7154
7155                 wret = walk_up_tree(trans, root, path, wc, parent_level);
7156                 if (wret < 0)
7157                         ret = wret;
7158                 if (wret != 0)
7159                         break;
7160         }
7161
7162         kfree(wc);
7163         btrfs_free_path(path);
7164         return ret;
7165 }
7166
7167 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7168 {
7169         u64 num_devices;
7170         u64 stripped;
7171
7172         /*
7173          * if restripe for this chunk_type is on pick target profile and
7174          * return, otherwise do the usual balance
7175          */
7176         stripped = get_restripe_target(root->fs_info, flags);
7177         if (stripped)
7178                 return extended_to_chunk(stripped);
7179
7180         /*
7181          * we add in the count of missing devices because we want
7182          * to make sure that any RAID levels on a degraded FS
7183          * continue to be honored.
7184          */
7185         num_devices = root->fs_info->fs_devices->rw_devices +
7186                 root->fs_info->fs_devices->missing_devices;
7187
7188         stripped = BTRFS_BLOCK_GROUP_RAID0 |
7189                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7190
7191         if (num_devices == 1) {
7192                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7193                 stripped = flags & ~stripped;
7194
7195                 /* turn raid0 into single device chunks */
7196                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7197                         return stripped;
7198
7199                 /* turn mirroring into duplication */
7200                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7201                              BTRFS_BLOCK_GROUP_RAID10))
7202                         return stripped | BTRFS_BLOCK_GROUP_DUP;
7203         } else {
7204                 /* they already had raid on here, just return */
7205                 if (flags & stripped)
7206                         return flags;
7207
7208                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7209                 stripped = flags & ~stripped;
7210
7211                 /* switch duplicated blocks with raid1 */
7212                 if (flags & BTRFS_BLOCK_GROUP_DUP)
7213                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
7214
7215                 /* this is drive concat, leave it alone */
7216         }
7217
7218         return flags;
7219 }
7220
7221 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7222 {
7223         struct btrfs_space_info *sinfo = cache->space_info;
7224         u64 num_bytes;
7225         u64 min_allocable_bytes;
7226         int ret = -ENOSPC;
7227
7228
7229         /*
7230          * We need some metadata space and system metadata space for
7231          * allocating chunks in some corner cases until we force to set
7232          * it to be readonly.
7233          */
7234         if ((sinfo->flags &
7235              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7236             !force)
7237                 min_allocable_bytes = 1 * 1024 * 1024;
7238         else
7239                 min_allocable_bytes = 0;
7240
7241         spin_lock(&sinfo->lock);
7242         spin_lock(&cache->lock);
7243
7244         if (cache->ro) {
7245                 ret = 0;
7246                 goto out;
7247         }
7248
7249         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7250                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7251
7252         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7253             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7254             min_allocable_bytes <= sinfo->total_bytes) {
7255                 sinfo->bytes_readonly += num_bytes;
7256                 cache->ro = 1;
7257                 ret = 0;
7258         }
7259 out:
7260         spin_unlock(&cache->lock);
7261         spin_unlock(&sinfo->lock);
7262         return ret;
7263 }
7264
7265 int btrfs_set_block_group_ro(struct btrfs_root *root,
7266                              struct btrfs_block_group_cache *cache)
7267
7268 {
7269         struct btrfs_trans_handle *trans;
7270         u64 alloc_flags;
7271         int ret;
7272
7273         BUG_ON(cache->ro);
7274
7275         trans = btrfs_join_transaction(root);
7276         if (IS_ERR(trans))
7277                 return PTR_ERR(trans);
7278
7279         alloc_flags = update_block_group_flags(root, cache->flags);
7280         if (alloc_flags != cache->flags) {
7281                 ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7282                                      CHUNK_ALLOC_FORCE);
7283                 if (ret < 0)
7284                         goto out;
7285         }
7286
7287         ret = set_block_group_ro(cache, 0);
7288         if (!ret)
7289                 goto out;
7290         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7291         ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7292                              CHUNK_ALLOC_FORCE);
7293         if (ret < 0)
7294                 goto out;
7295         ret = set_block_group_ro(cache, 0);
7296 out:
7297         btrfs_end_transaction(trans, root);
7298         return ret;
7299 }
7300
7301 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7302                             struct btrfs_root *root, u64 type)
7303 {
7304         u64 alloc_flags = get_alloc_profile(root, type);
7305         return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7306                               CHUNK_ALLOC_FORCE);
7307 }
7308
7309 /*
7310  * helper to account the unused space of all the readonly block group in the
7311  * list. takes mirrors into account.
7312  */
7313 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7314 {
7315         struct btrfs_block_group_cache *block_group;
7316         u64 free_bytes = 0;
7317         int factor;
7318
7319         list_for_each_entry(block_group, groups_list, list) {
7320                 spin_lock(&block_group->lock);
7321
7322                 if (!block_group->ro) {
7323                         spin_unlock(&block_group->lock);
7324                         continue;
7325                 }
7326
7327                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7328                                           BTRFS_BLOCK_GROUP_RAID10 |
7329                                           BTRFS_BLOCK_GROUP_DUP))
7330                         factor = 2;
7331                 else
7332                         factor = 1;
7333
7334                 free_bytes += (block_group->key.offset -
7335                                btrfs_block_group_used(&block_group->item)) *
7336                                factor;
7337
7338                 spin_unlock(&block_group->lock);
7339         }
7340
7341         return free_bytes;
7342 }
7343
7344 /*
7345  * helper to account the unused space of all the readonly block group in the
7346  * space_info. takes mirrors into account.
7347  */
7348 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7349 {
7350         int i;
7351         u64 free_bytes = 0;
7352
7353         spin_lock(&sinfo->lock);
7354
7355         for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
7356                 if (!list_empty(&sinfo->block_groups[i]))
7357                         free_bytes += __btrfs_get_ro_block_group_free_space(
7358                                                 &sinfo->block_groups[i]);
7359
7360         spin_unlock(&sinfo->lock);
7361
7362         return free_bytes;
7363 }
7364
7365 void btrfs_set_block_group_rw(struct btrfs_root *root,
7366                               struct btrfs_block_group_cache *cache)
7367 {
7368         struct btrfs_space_info *sinfo = cache->space_info;
7369         u64 num_bytes;
7370
7371         BUG_ON(!cache->ro);
7372
7373         spin_lock(&sinfo->lock);
7374         spin_lock(&cache->lock);
7375         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7376                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7377         sinfo->bytes_readonly -= num_bytes;
7378         cache->ro = 0;
7379         spin_unlock(&cache->lock);
7380         spin_unlock(&sinfo->lock);
7381 }
7382
7383 /*
7384  * checks to see if its even possible to relocate this block group.
7385  *
7386  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7387  * ok to go ahead and try.
7388  */
7389 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7390 {
7391         struct btrfs_block_group_cache *block_group;
7392         struct btrfs_space_info *space_info;
7393         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7394         struct btrfs_device *device;
7395         u64 min_free;
7396         u64 dev_min = 1;
7397         u64 dev_nr = 0;
7398         u64 target;
7399         int index;
7400         int full = 0;
7401         int ret = 0;
7402
7403         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7404
7405         /* odd, couldn't find the block group, leave it alone */
7406         if (!block_group)
7407                 return -1;
7408
7409         min_free = btrfs_block_group_used(&block_group->item);
7410
7411         /* no bytes used, we're good */
7412         if (!min_free)
7413                 goto out;
7414
7415         space_info = block_group->space_info;
7416         spin_lock(&space_info->lock);
7417
7418         full = space_info->full;
7419
7420         /*
7421          * if this is the last block group we have in this space, we can't
7422          * relocate it unless we're able to allocate a new chunk below.
7423          *
7424          * Otherwise, we need to make sure we have room in the space to handle
7425          * all of the extents from this block group.  If we can, we're good
7426          */
7427         if ((space_info->total_bytes != block_group->key.offset) &&
7428             (space_info->bytes_used + space_info->bytes_reserved +
7429              space_info->bytes_pinned + space_info->bytes_readonly +
7430              min_free < space_info->total_bytes)) {
7431                 spin_unlock(&space_info->lock);
7432                 goto out;
7433         }
7434         spin_unlock(&space_info->lock);
7435
7436         /*
7437          * ok we don't have enough space, but maybe we have free space on our
7438          * devices to allocate new chunks for relocation, so loop through our
7439          * alloc devices and guess if we have enough space.  if this block
7440          * group is going to be restriped, run checks against the target
7441          * profile instead of the current one.
7442          */
7443         ret = -1;
7444
7445         /*
7446          * index:
7447          *      0: raid10
7448          *      1: raid1
7449          *      2: dup
7450          *      3: raid0
7451          *      4: single
7452          */
7453         target = get_restripe_target(root->fs_info, block_group->flags);
7454         if (target) {
7455                 index = __get_block_group_index(extended_to_chunk(target));
7456         } else {
7457                 /*
7458                  * this is just a balance, so if we were marked as full
7459                  * we know there is no space for a new chunk
7460                  */
7461                 if (full)
7462                         goto out;
7463
7464                 index = get_block_group_index(block_group);
7465         }
7466
7467         if (index == 0) {
7468                 dev_min = 4;
7469                 /* Divide by 2 */
7470                 min_free >>= 1;
7471         } else if (index == 1) {
7472                 dev_min = 2;
7473         } else if (index == 2) {
7474                 /* Multiply by 2 */
7475                 min_free <<= 1;
7476         } else if (index == 3) {
7477                 dev_min = fs_devices->rw_devices;
7478                 do_div(min_free, dev_min);
7479         }
7480
7481         mutex_lock(&root->fs_info->chunk_mutex);
7482         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7483                 u64 dev_offset;
7484
7485                 /*
7486                  * check to make sure we can actually find a chunk with enough
7487                  * space to fit our block group in.
7488                  */
7489                 if (device->total_bytes > device->bytes_used + min_free) {
7490                         ret = find_free_dev_extent(device, min_free,
7491                                                    &dev_offset, NULL);
7492                         if (!ret)
7493                                 dev_nr++;
7494
7495                         if (dev_nr >= dev_min)
7496                                 break;
7497
7498                         ret = -1;
7499                 }
7500         }
7501         mutex_unlock(&root->fs_info->chunk_mutex);
7502 out:
7503         btrfs_put_block_group(block_group);
7504         return ret;
7505 }
7506
7507 static int find_first_block_group(struct btrfs_root *root,
7508                 struct btrfs_path *path, struct btrfs_key *key)
7509 {
7510         int ret = 0;
7511         struct btrfs_key found_key;
7512         struct extent_buffer *leaf;
7513         int slot;
7514
7515         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7516         if (ret < 0)
7517                 goto out;
7518
7519         while (1) {
7520                 slot = path->slots[0];
7521                 leaf = path->nodes[0];
7522                 if (slot >= btrfs_header_nritems(leaf)) {
7523                         ret = btrfs_next_leaf(root, path);
7524                         if (ret == 0)
7525                                 continue;
7526                         if (ret < 0)
7527                                 goto out;
7528                         break;
7529                 }
7530                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7531
7532                 if (found_key.objectid >= key->objectid &&
7533                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7534                         ret = 0;
7535                         goto out;
7536                 }
7537                 path->slots[0]++;
7538         }
7539 out:
7540         return ret;
7541 }
7542
7543 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
7544 {
7545         struct btrfs_block_group_cache *block_group;
7546         u64 last = 0;
7547
7548         while (1) {
7549                 struct inode *inode;
7550
7551                 block_group = btrfs_lookup_first_block_group(info, last);
7552                 while (block_group) {
7553                         spin_lock(&block_group->lock);
7554                         if (block_group->iref)
7555                                 break;
7556                         spin_unlock(&block_group->lock);
7557                         block_group = next_block_group(info->tree_root,
7558                                                        block_group);
7559                 }
7560                 if (!block_group) {
7561                         if (last == 0)
7562                                 break;
7563                         last = 0;
7564                         continue;
7565                 }
7566
7567                 inode = block_group->inode;
7568                 block_group->iref = 0;
7569                 block_group->inode = NULL;
7570                 spin_unlock(&block_group->lock);
7571                 iput(inode);
7572                 last = block_group->key.objectid + block_group->key.offset;
7573                 btrfs_put_block_group(block_group);
7574         }
7575 }
7576
7577 int btrfs_free_block_groups(struct btrfs_fs_info *info)
7578 {
7579         struct btrfs_block_group_cache *block_group;
7580         struct btrfs_space_info *space_info;
7581         struct btrfs_caching_control *caching_ctl;
7582         struct rb_node *n;
7583
7584         down_write(&info->extent_commit_sem);
7585         while (!list_empty(&info->caching_block_groups)) {
7586                 caching_ctl = list_entry(info->caching_block_groups.next,
7587                                          struct btrfs_caching_control, list);
7588                 list_del(&caching_ctl->list);
7589                 put_caching_control(caching_ctl);
7590         }
7591         up_write(&info->extent_commit_sem);
7592
7593         spin_lock(&info->block_group_cache_lock);
7594         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7595                 block_group = rb_entry(n, struct btrfs_block_group_cache,
7596                                        cache_node);
7597                 rb_erase(&block_group->cache_node,
7598                          &info->block_group_cache_tree);
7599                 spin_unlock(&info->block_group_cache_lock);
7600
7601                 down_write(&block_group->space_info->groups_sem);
7602                 list_del(&block_group->list);
7603                 up_write(&block_group->space_info->groups_sem);
7604
7605                 if (block_group->cached == BTRFS_CACHE_STARTED)
7606                         wait_block_group_cache_done(block_group);
7607
7608                 /*
7609                  * We haven't cached this block group, which means we could
7610                  * possibly have excluded extents on this block group.
7611                  */
7612                 if (block_group->cached == BTRFS_CACHE_NO)
7613                         free_excluded_extents(info->extent_root, block_group);
7614
7615                 btrfs_remove_free_space_cache(block_group);
7616                 btrfs_put_block_group(block_group);
7617
7618                 spin_lock(&info->block_group_cache_lock);
7619         }
7620         spin_unlock(&info->block_group_cache_lock);
7621
7622         /* now that all the block groups are freed, go through and
7623          * free all the space_info structs.  This is only called during
7624          * the final stages of unmount, and so we know nobody is
7625          * using them.  We call synchronize_rcu() once before we start,
7626          * just to be on the safe side.
7627          */
7628         synchronize_rcu();
7629
7630         release_global_block_rsv(info);
7631
7632         while(!list_empty(&info->space_info)) {
7633                 space_info = list_entry(info->space_info.next,
7634                                         struct btrfs_space_info,
7635                                         list);
7636                 if (space_info->bytes_pinned > 0 ||
7637                     space_info->bytes_reserved > 0 ||
7638                     space_info->bytes_may_use > 0) {
7639                         WARN_ON(1);
7640                         dump_space_info(space_info, 0, 0);
7641                 }
7642                 list_del(&space_info->list);
7643                 kfree(space_info);
7644         }
7645         return 0;
7646 }
7647
7648 static void __link_block_group(struct btrfs_space_info *space_info,
7649                                struct btrfs_block_group_cache *cache)
7650 {
7651         int index = get_block_group_index(cache);
7652
7653         down_write(&space_info->groups_sem);
7654         list_add_tail(&cache->list, &space_info->block_groups[index]);
7655         up_write(&space_info->groups_sem);
7656 }
7657
7658 int btrfs_read_block_groups(struct btrfs_root *root)
7659 {
7660         struct btrfs_path *path;
7661         int ret;
7662         struct btrfs_block_group_cache *cache;
7663         struct btrfs_fs_info *info = root->fs_info;
7664         struct btrfs_space_info *space_info;
7665         struct btrfs_key key;
7666         struct btrfs_key found_key;
7667         struct extent_buffer *leaf;
7668         int need_clear = 0;
7669         u64 cache_gen;
7670
7671         root = info->extent_root;
7672         key.objectid = 0;
7673         key.offset = 0;
7674         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
7675         path = btrfs_alloc_path();
7676         if (!path)
7677                 return -ENOMEM;
7678         path->reada = 1;
7679
7680         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
7681         if (btrfs_test_opt(root, SPACE_CACHE) &&
7682             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
7683                 need_clear = 1;
7684         if (btrfs_test_opt(root, CLEAR_CACHE))
7685                 need_clear = 1;
7686
7687         while (1) {
7688                 ret = find_first_block_group(root, path, &key);
7689                 if (ret > 0)
7690                         break;
7691                 if (ret != 0)
7692                         goto error;
7693                 leaf = path->nodes[0];
7694                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7695                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
7696                 if (!cache) {
7697                         ret = -ENOMEM;
7698                         goto error;
7699                 }
7700                 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7701                                                 GFP_NOFS);
7702                 if (!cache->free_space_ctl) {
7703                         kfree(cache);
7704                         ret = -ENOMEM;
7705                         goto error;
7706                 }
7707
7708                 atomic_set(&cache->count, 1);
7709                 spin_lock_init(&cache->lock);
7710                 cache->fs_info = info;
7711                 INIT_LIST_HEAD(&cache->list);
7712                 INIT_LIST_HEAD(&cache->cluster_list);
7713
7714                 if (need_clear)
7715                         cache->disk_cache_state = BTRFS_DC_CLEAR;
7716
7717                 read_extent_buffer(leaf, &cache->item,
7718                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
7719                                    sizeof(cache->item));
7720                 memcpy(&cache->key, &found_key, sizeof(found_key));
7721
7722                 key.objectid = found_key.objectid + found_key.offset;
7723                 btrfs_release_path(path);
7724                 cache->flags = btrfs_block_group_flags(&cache->item);
7725                 cache->sectorsize = root->sectorsize;
7726
7727                 btrfs_init_free_space_ctl(cache);
7728
7729                 /*
7730                  * We need to exclude the super stripes now so that the space
7731                  * info has super bytes accounted for, otherwise we'll think
7732                  * we have more space than we actually do.
7733                  */
7734                 exclude_super_stripes(root, cache);
7735
7736                 /*
7737                  * check for two cases, either we are full, and therefore
7738                  * don't need to bother with the caching work since we won't
7739                  * find any space, or we are empty, and we can just add all
7740                  * the space in and be done with it.  This saves us _alot_ of
7741                  * time, particularly in the full case.
7742                  */
7743                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7744                         cache->last_byte_to_unpin = (u64)-1;
7745                         cache->cached = BTRFS_CACHE_FINISHED;
7746                         free_excluded_extents(root, cache);
7747                 } else if (btrfs_block_group_used(&cache->item) == 0) {
7748                         cache->last_byte_to_unpin = (u64)-1;
7749                         cache->cached = BTRFS_CACHE_FINISHED;
7750                         add_new_free_space(cache, root->fs_info,
7751                                            found_key.objectid,
7752                                            found_key.objectid +
7753                                            found_key.offset);
7754                         free_excluded_extents(root, cache);
7755                 }
7756
7757                 ret = update_space_info(info, cache->flags, found_key.offset,
7758                                         btrfs_block_group_used(&cache->item),
7759                                         &space_info);
7760                 BUG_ON(ret); /* -ENOMEM */
7761                 cache->space_info = space_info;
7762                 spin_lock(&cache->space_info->lock);
7763                 cache->space_info->bytes_readonly += cache->bytes_super;
7764                 spin_unlock(&cache->space_info->lock);
7765
7766                 __link_block_group(space_info, cache);
7767
7768                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7769                 BUG_ON(ret); /* Logic error */
7770
7771                 set_avail_alloc_bits(root->fs_info, cache->flags);
7772                 if (btrfs_chunk_readonly(root, cache->key.objectid))
7773                         set_block_group_ro(cache, 1);
7774         }
7775
7776         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
7777                 if (!(get_alloc_profile(root, space_info->flags) &
7778                       (BTRFS_BLOCK_GROUP_RAID10 |
7779                        BTRFS_BLOCK_GROUP_RAID1 |
7780                        BTRFS_BLOCK_GROUP_DUP)))
7781                         continue;
7782                 /*
7783                  * avoid allocating from un-mirrored block group if there are
7784                  * mirrored block groups.
7785                  */
7786                 list_for_each_entry(cache, &space_info->block_groups[3], list)
7787                         set_block_group_ro(cache, 1);
7788                 list_for_each_entry(cache, &space_info->block_groups[4], list)
7789                         set_block_group_ro(cache, 1);
7790         }
7791
7792         init_global_block_rsv(info);
7793         ret = 0;
7794 error:
7795         btrfs_free_path(path);
7796         return ret;
7797 }
7798
7799 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7800                            struct btrfs_root *root, u64 bytes_used,
7801                            u64 type, u64 chunk_objectid, u64 chunk_offset,
7802                            u64 size)
7803 {
7804         int ret;
7805         struct btrfs_root *extent_root;
7806         struct btrfs_block_group_cache *cache;
7807
7808         extent_root = root->fs_info->extent_root;
7809
7810         root->fs_info->last_trans_log_full_commit = trans->transid;
7811
7812         cache = kzalloc(sizeof(*cache), GFP_NOFS);
7813         if (!cache)
7814                 return -ENOMEM;
7815         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7816                                         GFP_NOFS);
7817         if (!cache->free_space_ctl) {
7818                 kfree(cache);
7819                 return -ENOMEM;
7820         }
7821
7822         cache->key.objectid = chunk_offset;
7823         cache->key.offset = size;
7824         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7825         cache->sectorsize = root->sectorsize;
7826         cache->fs_info = root->fs_info;
7827
7828         atomic_set(&cache->count, 1);
7829         spin_lock_init(&cache->lock);
7830         INIT_LIST_HEAD(&cache->list);
7831         INIT_LIST_HEAD(&cache->cluster_list);
7832
7833         btrfs_init_free_space_ctl(cache);
7834
7835         btrfs_set_block_group_used(&cache->item, bytes_used);
7836         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7837         cache->flags = type;
7838         btrfs_set_block_group_flags(&cache->item, type);
7839
7840         cache->last_byte_to_unpin = (u64)-1;
7841         cache->cached = BTRFS_CACHE_FINISHED;
7842         exclude_super_stripes(root, cache);
7843
7844         add_new_free_space(cache, root->fs_info, chunk_offset,
7845                            chunk_offset + size);
7846
7847         free_excluded_extents(root, cache);
7848
7849         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7850                                 &cache->space_info);
7851         BUG_ON(ret); /* -ENOMEM */
7852         update_global_block_rsv(root->fs_info);
7853
7854         spin_lock(&cache->space_info->lock);
7855         cache->space_info->bytes_readonly += cache->bytes_super;
7856         spin_unlock(&cache->space_info->lock);
7857
7858         __link_block_group(cache->space_info, cache);
7859
7860         ret = btrfs_add_block_group_cache(root->fs_info, cache);
7861         BUG_ON(ret); /* Logic error */
7862
7863         ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
7864                                 sizeof(cache->item));
7865         if (ret) {
7866                 btrfs_abort_transaction(trans, extent_root, ret);
7867                 return ret;
7868         }
7869
7870         set_avail_alloc_bits(extent_root->fs_info, type);
7871
7872         return 0;
7873 }
7874
7875 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
7876 {
7877         u64 extra_flags = chunk_to_extended(flags) &
7878                                 BTRFS_EXTENDED_PROFILE_MASK;
7879
7880         if (flags & BTRFS_BLOCK_GROUP_DATA)
7881                 fs_info->avail_data_alloc_bits &= ~extra_flags;
7882         if (flags & BTRFS_BLOCK_GROUP_METADATA)
7883                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
7884         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
7885                 fs_info->avail_system_alloc_bits &= ~extra_flags;
7886 }
7887
7888 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7889                              struct btrfs_root *root, u64 group_start)
7890 {
7891         struct btrfs_path *path;
7892         struct btrfs_block_group_cache *block_group;
7893         struct btrfs_free_cluster *cluster;
7894         struct btrfs_root *tree_root = root->fs_info->tree_root;
7895         struct btrfs_key key;
7896         struct inode *inode;
7897         int ret;
7898         int index;
7899         int factor;
7900
7901         root = root->fs_info->extent_root;
7902
7903         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7904         BUG_ON(!block_group);
7905         BUG_ON(!block_group->ro);
7906
7907         /*
7908          * Free the reserved super bytes from this block group before
7909          * remove it.
7910          */
7911         free_excluded_extents(root, block_group);
7912
7913         memcpy(&key, &block_group->key, sizeof(key));
7914         index = get_block_group_index(block_group);
7915         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
7916                                   BTRFS_BLOCK_GROUP_RAID1 |
7917                                   BTRFS_BLOCK_GROUP_RAID10))
7918                 factor = 2;
7919         else
7920                 factor = 1;
7921
7922         /* make sure this block group isn't part of an allocation cluster */
7923         cluster = &root->fs_info->data_alloc_cluster;
7924         spin_lock(&cluster->refill_lock);
7925         btrfs_return_cluster_to_free_space(block_group, cluster);
7926         spin_unlock(&cluster->refill_lock);
7927
7928         /*
7929          * make sure this block group isn't part of a metadata
7930          * allocation cluster
7931          */
7932         cluster = &root->fs_info->meta_alloc_cluster;
7933         spin_lock(&cluster->refill_lock);
7934         btrfs_return_cluster_to_free_space(block_group, cluster);
7935         spin_unlock(&cluster->refill_lock);
7936
7937         path = btrfs_alloc_path();
7938         if (!path) {
7939                 ret = -ENOMEM;
7940                 goto out;
7941         }
7942
7943         inode = lookup_free_space_inode(tree_root, block_group, path);
7944         if (!IS_ERR(inode)) {
7945                 ret = btrfs_orphan_add(trans, inode);
7946                 if (ret) {
7947                         btrfs_add_delayed_iput(inode);
7948                         goto out;
7949                 }
7950                 clear_nlink(inode);
7951                 /* One for the block groups ref */
7952                 spin_lock(&block_group->lock);
7953                 if (block_group->iref) {
7954                         block_group->iref = 0;
7955                         block_group->inode = NULL;
7956                         spin_unlock(&block_group->lock);
7957                         iput(inode);
7958                 } else {
7959                         spin_unlock(&block_group->lock);
7960                 }
7961                 /* One for our lookup ref */
7962                 btrfs_add_delayed_iput(inode);
7963         }
7964
7965         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
7966         key.offset = block_group->key.objectid;
7967         key.type = 0;
7968
7969         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
7970         if (ret < 0)
7971                 goto out;
7972         if (ret > 0)
7973                 btrfs_release_path(path);
7974         if (ret == 0) {
7975                 ret = btrfs_del_item(trans, tree_root, path);
7976                 if (ret)
7977                         goto out;
7978                 btrfs_release_path(path);
7979         }
7980
7981         spin_lock(&root->fs_info->block_group_cache_lock);
7982         rb_erase(&block_group->cache_node,
7983                  &root->fs_info->block_group_cache_tree);
7984         spin_unlock(&root->fs_info->block_group_cache_lock);
7985
7986         down_write(&block_group->space_info->groups_sem);
7987         /*
7988          * we must use list_del_init so people can check to see if they
7989          * are still on the list after taking the semaphore
7990          */
7991         list_del_init(&block_group->list);
7992         if (list_empty(&block_group->space_info->block_groups[index]))
7993                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
7994         up_write(&block_group->space_info->groups_sem);
7995
7996         if (block_group->cached == BTRFS_CACHE_STARTED)
7997                 wait_block_group_cache_done(block_group);
7998
7999         btrfs_remove_free_space_cache(block_group);
8000
8001         spin_lock(&block_group->space_info->lock);
8002         block_group->space_info->total_bytes -= block_group->key.offset;
8003         block_group->space_info->bytes_readonly -= block_group->key.offset;
8004         block_group->space_info->disk_total -= block_group->key.offset * factor;
8005         spin_unlock(&block_group->space_info->lock);
8006
8007         memcpy(&key, &block_group->key, sizeof(key));
8008
8009         btrfs_clear_space_info_full(root->fs_info);
8010
8011         btrfs_put_block_group(block_group);
8012         btrfs_put_block_group(block_group);
8013
8014         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8015         if (ret > 0)
8016                 ret = -EIO;
8017         if (ret < 0)
8018                 goto out;
8019
8020         ret = btrfs_del_item(trans, root, path);
8021 out:
8022         btrfs_free_path(path);
8023         return ret;
8024 }
8025
8026 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8027 {
8028         struct btrfs_space_info *space_info;
8029         struct btrfs_super_block *disk_super;
8030         u64 features;
8031         u64 flags;
8032         int mixed = 0;
8033         int ret;
8034
8035         disk_super = fs_info->super_copy;
8036         if (!btrfs_super_root(disk_super))
8037                 return 1;
8038
8039         features = btrfs_super_incompat_flags(disk_super);
8040         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8041                 mixed = 1;
8042
8043         flags = BTRFS_BLOCK_GROUP_SYSTEM;
8044         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8045         if (ret)
8046                 goto out;
8047
8048         if (mixed) {
8049                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8050                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8051         } else {
8052                 flags = BTRFS_BLOCK_GROUP_METADATA;
8053                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8054                 if (ret)
8055                         goto out;
8056
8057                 flags = BTRFS_BLOCK_GROUP_DATA;
8058                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8059         }
8060 out:
8061         return ret;
8062 }
8063
8064 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8065 {
8066         return unpin_extent_range(root, start, end);
8067 }
8068
8069 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8070                                u64 num_bytes, u64 *actual_bytes)
8071 {
8072         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8073 }
8074
8075 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8076 {
8077         struct btrfs_fs_info *fs_info = root->fs_info;
8078         struct btrfs_block_group_cache *cache = NULL;
8079         u64 group_trimmed;
8080         u64 start;
8081         u64 end;
8082         u64 trimmed = 0;
8083         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8084         int ret = 0;
8085
8086         /*
8087          * try to trim all FS space, our block group may start from non-zero.
8088          */
8089         if (range->len == total_bytes)
8090                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8091         else
8092                 cache = btrfs_lookup_block_group(fs_info, range->start);
8093
8094         while (cache) {
8095                 if (cache->key.objectid >= (range->start + range->len)) {
8096                         btrfs_put_block_group(cache);
8097                         break;
8098                 }
8099
8100                 start = max(range->start, cache->key.objectid);
8101                 end = min(range->start + range->len,
8102                                 cache->key.objectid + cache->key.offset);
8103
8104                 if (end - start >= range->minlen) {
8105                         if (!block_group_cache_done(cache)) {
8106                                 ret = cache_block_group(cache, NULL, root, 0);
8107                                 if (!ret)
8108                                         wait_block_group_cache_done(cache);
8109                         }
8110                         ret = btrfs_trim_block_group(cache,
8111                                                      &group_trimmed,
8112                                                      start,
8113                                                      end,
8114                                                      range->minlen);
8115
8116                         trimmed += group_trimmed;
8117                         if (ret) {
8118                                 btrfs_put_block_group(cache);
8119                                 break;
8120                         }
8121                 }
8122
8123                 cache = next_block_group(fs_info->tree_root, cache);
8124         }
8125
8126         range->len = trimmed;
8127         return ret;
8128 }