Btrfs: handle running extent ops with skinny metadata
[firefly-linux-kernel-4.4.55.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include "compat.h"
28 #include "hash.h"
29 #include "ctree.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "transaction.h"
33 #include "volumes.h"
34 #include "raid56.h"
35 #include "locking.h"
36 #include "free-space-cache.h"
37 #include "math.h"
38
39 #undef SCRAMBLE_DELAYED_REFS
40
41 /*
42  * control flags for do_chunk_alloc's force field
43  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
44  * if we really need one.
45  *
46  * CHUNK_ALLOC_LIMITED means to only try and allocate one
47  * if we have very few chunks already allocated.  This is
48  * used as part of the clustering code to help make sure
49  * we have a good pool of storage to cluster in, without
50  * filling the FS with empty chunks
51  *
52  * CHUNK_ALLOC_FORCE means it must try to allocate one
53  *
54  */
55 enum {
56         CHUNK_ALLOC_NO_FORCE = 0,
57         CHUNK_ALLOC_LIMITED = 1,
58         CHUNK_ALLOC_FORCE = 2,
59 };
60
61 /*
62  * Control how reservations are dealt with.
63  *
64  * RESERVE_FREE - freeing a reservation.
65  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
66  *   ENOSPC accounting
67  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
68  *   bytes_may_use as the ENOSPC accounting is done elsewhere
69  */
70 enum {
71         RESERVE_FREE = 0,
72         RESERVE_ALLOC = 1,
73         RESERVE_ALLOC_NO_ACCOUNT = 2,
74 };
75
76 static int update_block_group(struct btrfs_root *root,
77                               u64 bytenr, u64 num_bytes, int alloc);
78 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
79                                 struct btrfs_root *root,
80                                 u64 bytenr, u64 num_bytes, u64 parent,
81                                 u64 root_objectid, u64 owner_objectid,
82                                 u64 owner_offset, int refs_to_drop,
83                                 struct btrfs_delayed_extent_op *extra_op);
84 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
85                                     struct extent_buffer *leaf,
86                                     struct btrfs_extent_item *ei);
87 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
88                                       struct btrfs_root *root,
89                                       u64 parent, u64 root_objectid,
90                                       u64 flags, u64 owner, u64 offset,
91                                       struct btrfs_key *ins, int ref_mod);
92 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
93                                      struct btrfs_root *root,
94                                      u64 parent, u64 root_objectid,
95                                      u64 flags, struct btrfs_disk_key *key,
96                                      int level, struct btrfs_key *ins);
97 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
98                           struct btrfs_root *extent_root, u64 flags,
99                           int force);
100 static int find_next_key(struct btrfs_path *path, int level,
101                          struct btrfs_key *key);
102 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
103                             int dump_block_groups);
104 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
105                                        u64 num_bytes, int reserve);
106 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
107                                u64 num_bytes);
108 int btrfs_pin_extent(struct btrfs_root *root,
109                      u64 bytenr, u64 num_bytes, int reserved);
110
111 static noinline int
112 block_group_cache_done(struct btrfs_block_group_cache *cache)
113 {
114         smp_mb();
115         return cache->cached == BTRFS_CACHE_FINISHED;
116 }
117
118 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
119 {
120         return (cache->flags & bits) == bits;
121 }
122
123 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
124 {
125         atomic_inc(&cache->count);
126 }
127
128 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
129 {
130         if (atomic_dec_and_test(&cache->count)) {
131                 WARN_ON(cache->pinned > 0);
132                 WARN_ON(cache->reserved > 0);
133                 kfree(cache->free_space_ctl);
134                 kfree(cache);
135         }
136 }
137
138 /*
139  * this adds the block group to the fs_info rb tree for the block group
140  * cache
141  */
142 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
143                                 struct btrfs_block_group_cache *block_group)
144 {
145         struct rb_node **p;
146         struct rb_node *parent = NULL;
147         struct btrfs_block_group_cache *cache;
148
149         spin_lock(&info->block_group_cache_lock);
150         p = &info->block_group_cache_tree.rb_node;
151
152         while (*p) {
153                 parent = *p;
154                 cache = rb_entry(parent, struct btrfs_block_group_cache,
155                                  cache_node);
156                 if (block_group->key.objectid < cache->key.objectid) {
157                         p = &(*p)->rb_left;
158                 } else if (block_group->key.objectid > cache->key.objectid) {
159                         p = &(*p)->rb_right;
160                 } else {
161                         spin_unlock(&info->block_group_cache_lock);
162                         return -EEXIST;
163                 }
164         }
165
166         rb_link_node(&block_group->cache_node, parent, p);
167         rb_insert_color(&block_group->cache_node,
168                         &info->block_group_cache_tree);
169
170         if (info->first_logical_byte > block_group->key.objectid)
171                 info->first_logical_byte = block_group->key.objectid;
172
173         spin_unlock(&info->block_group_cache_lock);
174
175         return 0;
176 }
177
178 /*
179  * This will return the block group at or after bytenr if contains is 0, else
180  * it will return the block group that contains the bytenr
181  */
182 static struct btrfs_block_group_cache *
183 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
184                               int contains)
185 {
186         struct btrfs_block_group_cache *cache, *ret = NULL;
187         struct rb_node *n;
188         u64 end, start;
189
190         spin_lock(&info->block_group_cache_lock);
191         n = info->block_group_cache_tree.rb_node;
192
193         while (n) {
194                 cache = rb_entry(n, struct btrfs_block_group_cache,
195                                  cache_node);
196                 end = cache->key.objectid + cache->key.offset - 1;
197                 start = cache->key.objectid;
198
199                 if (bytenr < start) {
200                         if (!contains && (!ret || start < ret->key.objectid))
201                                 ret = cache;
202                         n = n->rb_left;
203                 } else if (bytenr > start) {
204                         if (contains && bytenr <= end) {
205                                 ret = cache;
206                                 break;
207                         }
208                         n = n->rb_right;
209                 } else {
210                         ret = cache;
211                         break;
212                 }
213         }
214         if (ret) {
215                 btrfs_get_block_group(ret);
216                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
217                         info->first_logical_byte = ret->key.objectid;
218         }
219         spin_unlock(&info->block_group_cache_lock);
220
221         return ret;
222 }
223
224 static int add_excluded_extent(struct btrfs_root *root,
225                                u64 start, u64 num_bytes)
226 {
227         u64 end = start + num_bytes - 1;
228         set_extent_bits(&root->fs_info->freed_extents[0],
229                         start, end, EXTENT_UPTODATE, GFP_NOFS);
230         set_extent_bits(&root->fs_info->freed_extents[1],
231                         start, end, EXTENT_UPTODATE, GFP_NOFS);
232         return 0;
233 }
234
235 static void free_excluded_extents(struct btrfs_root *root,
236                                   struct btrfs_block_group_cache *cache)
237 {
238         u64 start, end;
239
240         start = cache->key.objectid;
241         end = start + cache->key.offset - 1;
242
243         clear_extent_bits(&root->fs_info->freed_extents[0],
244                           start, end, EXTENT_UPTODATE, GFP_NOFS);
245         clear_extent_bits(&root->fs_info->freed_extents[1],
246                           start, end, EXTENT_UPTODATE, GFP_NOFS);
247 }
248
249 static int exclude_super_stripes(struct btrfs_root *root,
250                                  struct btrfs_block_group_cache *cache)
251 {
252         u64 bytenr;
253         u64 *logical;
254         int stripe_len;
255         int i, nr, ret;
256
257         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
258                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
259                 cache->bytes_super += stripe_len;
260                 ret = add_excluded_extent(root, cache->key.objectid,
261                                           stripe_len);
262                 if (ret)
263                         return ret;
264         }
265
266         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
267                 bytenr = btrfs_sb_offset(i);
268                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
269                                        cache->key.objectid, bytenr,
270                                        0, &logical, &nr, &stripe_len);
271                 if (ret)
272                         return ret;
273
274                 while (nr--) {
275                         u64 start, len;
276
277                         if (logical[nr] > cache->key.objectid +
278                             cache->key.offset)
279                                 continue;
280
281                         if (logical[nr] + stripe_len <= cache->key.objectid)
282                                 continue;
283
284                         start = logical[nr];
285                         if (start < cache->key.objectid) {
286                                 start = cache->key.objectid;
287                                 len = (logical[nr] + stripe_len) - start;
288                         } else {
289                                 len = min_t(u64, stripe_len,
290                                             cache->key.objectid +
291                                             cache->key.offset - start);
292                         }
293
294                         cache->bytes_super += len;
295                         ret = add_excluded_extent(root, start, len);
296                         if (ret) {
297                                 kfree(logical);
298                                 return ret;
299                         }
300                 }
301
302                 kfree(logical);
303         }
304         return 0;
305 }
306
307 static struct btrfs_caching_control *
308 get_caching_control(struct btrfs_block_group_cache *cache)
309 {
310         struct btrfs_caching_control *ctl;
311
312         spin_lock(&cache->lock);
313         if (cache->cached != BTRFS_CACHE_STARTED) {
314                 spin_unlock(&cache->lock);
315                 return NULL;
316         }
317
318         /* We're loading it the fast way, so we don't have a caching_ctl. */
319         if (!cache->caching_ctl) {
320                 spin_unlock(&cache->lock);
321                 return NULL;
322         }
323
324         ctl = cache->caching_ctl;
325         atomic_inc(&ctl->count);
326         spin_unlock(&cache->lock);
327         return ctl;
328 }
329
330 static void put_caching_control(struct btrfs_caching_control *ctl)
331 {
332         if (atomic_dec_and_test(&ctl->count))
333                 kfree(ctl);
334 }
335
336 /*
337  * this is only called by cache_block_group, since we could have freed extents
338  * we need to check the pinned_extents for any extents that can't be used yet
339  * since their free space will be released as soon as the transaction commits.
340  */
341 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
342                               struct btrfs_fs_info *info, u64 start, u64 end)
343 {
344         u64 extent_start, extent_end, size, total_added = 0;
345         int ret;
346
347         while (start < end) {
348                 ret = find_first_extent_bit(info->pinned_extents, start,
349                                             &extent_start, &extent_end,
350                                             EXTENT_DIRTY | EXTENT_UPTODATE,
351                                             NULL);
352                 if (ret)
353                         break;
354
355                 if (extent_start <= start) {
356                         start = extent_end + 1;
357                 } else if (extent_start > start && extent_start < end) {
358                         size = extent_start - start;
359                         total_added += size;
360                         ret = btrfs_add_free_space(block_group, start,
361                                                    size);
362                         BUG_ON(ret); /* -ENOMEM or logic error */
363                         start = extent_end + 1;
364                 } else {
365                         break;
366                 }
367         }
368
369         if (start < end) {
370                 size = end - start;
371                 total_added += size;
372                 ret = btrfs_add_free_space(block_group, start, size);
373                 BUG_ON(ret); /* -ENOMEM or logic error */
374         }
375
376         return total_added;
377 }
378
379 static noinline void caching_thread(struct btrfs_work *work)
380 {
381         struct btrfs_block_group_cache *block_group;
382         struct btrfs_fs_info *fs_info;
383         struct btrfs_caching_control *caching_ctl;
384         struct btrfs_root *extent_root;
385         struct btrfs_path *path;
386         struct extent_buffer *leaf;
387         struct btrfs_key key;
388         u64 total_found = 0;
389         u64 last = 0;
390         u32 nritems;
391         int ret = 0;
392
393         caching_ctl = container_of(work, struct btrfs_caching_control, work);
394         block_group = caching_ctl->block_group;
395         fs_info = block_group->fs_info;
396         extent_root = fs_info->extent_root;
397
398         path = btrfs_alloc_path();
399         if (!path)
400                 goto out;
401
402         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
403
404         /*
405          * We don't want to deadlock with somebody trying to allocate a new
406          * extent for the extent root while also trying to search the extent
407          * root to add free space.  So we skip locking and search the commit
408          * root, since its read-only
409          */
410         path->skip_locking = 1;
411         path->search_commit_root = 1;
412         path->reada = 1;
413
414         key.objectid = last;
415         key.offset = 0;
416         key.type = BTRFS_EXTENT_ITEM_KEY;
417 again:
418         mutex_lock(&caching_ctl->mutex);
419         /* need to make sure the commit_root doesn't disappear */
420         down_read(&fs_info->extent_commit_sem);
421
422         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
423         if (ret < 0)
424                 goto err;
425
426         leaf = path->nodes[0];
427         nritems = btrfs_header_nritems(leaf);
428
429         while (1) {
430                 if (btrfs_fs_closing(fs_info) > 1) {
431                         last = (u64)-1;
432                         break;
433                 }
434
435                 if (path->slots[0] < nritems) {
436                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
437                 } else {
438                         ret = find_next_key(path, 0, &key);
439                         if (ret)
440                                 break;
441
442                         if (need_resched()) {
443                                 caching_ctl->progress = last;
444                                 btrfs_release_path(path);
445                                 up_read(&fs_info->extent_commit_sem);
446                                 mutex_unlock(&caching_ctl->mutex);
447                                 cond_resched();
448                                 goto again;
449                         }
450
451                         ret = btrfs_next_leaf(extent_root, path);
452                         if (ret < 0)
453                                 goto err;
454                         if (ret)
455                                 break;
456                         leaf = path->nodes[0];
457                         nritems = btrfs_header_nritems(leaf);
458                         continue;
459                 }
460
461                 if (key.objectid < block_group->key.objectid) {
462                         path->slots[0]++;
463                         continue;
464                 }
465
466                 if (key.objectid >= block_group->key.objectid +
467                     block_group->key.offset)
468                         break;
469
470                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
471                     key.type == BTRFS_METADATA_ITEM_KEY) {
472                         total_found += add_new_free_space(block_group,
473                                                           fs_info, last,
474                                                           key.objectid);
475                         if (key.type == BTRFS_METADATA_ITEM_KEY)
476                                 last = key.objectid +
477                                         fs_info->tree_root->leafsize;
478                         else
479                                 last = key.objectid + key.offset;
480
481                         if (total_found > (1024 * 1024 * 2)) {
482                                 total_found = 0;
483                                 wake_up(&caching_ctl->wait);
484                         }
485                 }
486                 path->slots[0]++;
487         }
488         ret = 0;
489
490         total_found += add_new_free_space(block_group, fs_info, last,
491                                           block_group->key.objectid +
492                                           block_group->key.offset);
493         caching_ctl->progress = (u64)-1;
494
495         spin_lock(&block_group->lock);
496         block_group->caching_ctl = NULL;
497         block_group->cached = BTRFS_CACHE_FINISHED;
498         spin_unlock(&block_group->lock);
499
500 err:
501         btrfs_free_path(path);
502         up_read(&fs_info->extent_commit_sem);
503
504         free_excluded_extents(extent_root, block_group);
505
506         mutex_unlock(&caching_ctl->mutex);
507 out:
508         wake_up(&caching_ctl->wait);
509
510         put_caching_control(caching_ctl);
511         btrfs_put_block_group(block_group);
512 }
513
514 static int cache_block_group(struct btrfs_block_group_cache *cache,
515                              int load_cache_only)
516 {
517         DEFINE_WAIT(wait);
518         struct btrfs_fs_info *fs_info = cache->fs_info;
519         struct btrfs_caching_control *caching_ctl;
520         int ret = 0;
521
522         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
523         if (!caching_ctl)
524                 return -ENOMEM;
525
526         INIT_LIST_HEAD(&caching_ctl->list);
527         mutex_init(&caching_ctl->mutex);
528         init_waitqueue_head(&caching_ctl->wait);
529         caching_ctl->block_group = cache;
530         caching_ctl->progress = cache->key.objectid;
531         atomic_set(&caching_ctl->count, 1);
532         caching_ctl->work.func = caching_thread;
533
534         spin_lock(&cache->lock);
535         /*
536          * This should be a rare occasion, but this could happen I think in the
537          * case where one thread starts to load the space cache info, and then
538          * some other thread starts a transaction commit which tries to do an
539          * allocation while the other thread is still loading the space cache
540          * info.  The previous loop should have kept us from choosing this block
541          * group, but if we've moved to the state where we will wait on caching
542          * block groups we need to first check if we're doing a fast load here,
543          * so we can wait for it to finish, otherwise we could end up allocating
544          * from a block group who's cache gets evicted for one reason or
545          * another.
546          */
547         while (cache->cached == BTRFS_CACHE_FAST) {
548                 struct btrfs_caching_control *ctl;
549
550                 ctl = cache->caching_ctl;
551                 atomic_inc(&ctl->count);
552                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
553                 spin_unlock(&cache->lock);
554
555                 schedule();
556
557                 finish_wait(&ctl->wait, &wait);
558                 put_caching_control(ctl);
559                 spin_lock(&cache->lock);
560         }
561
562         if (cache->cached != BTRFS_CACHE_NO) {
563                 spin_unlock(&cache->lock);
564                 kfree(caching_ctl);
565                 return 0;
566         }
567         WARN_ON(cache->caching_ctl);
568         cache->caching_ctl = caching_ctl;
569         cache->cached = BTRFS_CACHE_FAST;
570         spin_unlock(&cache->lock);
571
572         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
573                 ret = load_free_space_cache(fs_info, cache);
574
575                 spin_lock(&cache->lock);
576                 if (ret == 1) {
577                         cache->caching_ctl = NULL;
578                         cache->cached = BTRFS_CACHE_FINISHED;
579                         cache->last_byte_to_unpin = (u64)-1;
580                 } else {
581                         if (load_cache_only) {
582                                 cache->caching_ctl = NULL;
583                                 cache->cached = BTRFS_CACHE_NO;
584                         } else {
585                                 cache->cached = BTRFS_CACHE_STARTED;
586                         }
587                 }
588                 spin_unlock(&cache->lock);
589                 wake_up(&caching_ctl->wait);
590                 if (ret == 1) {
591                         put_caching_control(caching_ctl);
592                         free_excluded_extents(fs_info->extent_root, cache);
593                         return 0;
594                 }
595         } else {
596                 /*
597                  * We are not going to do the fast caching, set cached to the
598                  * appropriate value and wakeup any waiters.
599                  */
600                 spin_lock(&cache->lock);
601                 if (load_cache_only) {
602                         cache->caching_ctl = NULL;
603                         cache->cached = BTRFS_CACHE_NO;
604                 } else {
605                         cache->cached = BTRFS_CACHE_STARTED;
606                 }
607                 spin_unlock(&cache->lock);
608                 wake_up(&caching_ctl->wait);
609         }
610
611         if (load_cache_only) {
612                 put_caching_control(caching_ctl);
613                 return 0;
614         }
615
616         down_write(&fs_info->extent_commit_sem);
617         atomic_inc(&caching_ctl->count);
618         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
619         up_write(&fs_info->extent_commit_sem);
620
621         btrfs_get_block_group(cache);
622
623         btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
624
625         return ret;
626 }
627
628 /*
629  * return the block group that starts at or after bytenr
630  */
631 static struct btrfs_block_group_cache *
632 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
633 {
634         struct btrfs_block_group_cache *cache;
635
636         cache = block_group_cache_tree_search(info, bytenr, 0);
637
638         return cache;
639 }
640
641 /*
642  * return the block group that contains the given bytenr
643  */
644 struct btrfs_block_group_cache *btrfs_lookup_block_group(
645                                                  struct btrfs_fs_info *info,
646                                                  u64 bytenr)
647 {
648         struct btrfs_block_group_cache *cache;
649
650         cache = block_group_cache_tree_search(info, bytenr, 1);
651
652         return cache;
653 }
654
655 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
656                                                   u64 flags)
657 {
658         struct list_head *head = &info->space_info;
659         struct btrfs_space_info *found;
660
661         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
662
663         rcu_read_lock();
664         list_for_each_entry_rcu(found, head, list) {
665                 if (found->flags & flags) {
666                         rcu_read_unlock();
667                         return found;
668                 }
669         }
670         rcu_read_unlock();
671         return NULL;
672 }
673
674 /*
675  * after adding space to the filesystem, we need to clear the full flags
676  * on all the space infos.
677  */
678 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
679 {
680         struct list_head *head = &info->space_info;
681         struct btrfs_space_info *found;
682
683         rcu_read_lock();
684         list_for_each_entry_rcu(found, head, list)
685                 found->full = 0;
686         rcu_read_unlock();
687 }
688
689 /* simple helper to search for an existing extent at a given offset */
690 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
691 {
692         int ret;
693         struct btrfs_key key;
694         struct btrfs_path *path;
695
696         path = btrfs_alloc_path();
697         if (!path)
698                 return -ENOMEM;
699
700         key.objectid = start;
701         key.offset = len;
702         key.type = BTRFS_EXTENT_ITEM_KEY;
703         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
704                                 0, 0);
705         if (ret > 0) {
706                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
707                 if (key.objectid == start &&
708                     key.type == BTRFS_METADATA_ITEM_KEY)
709                         ret = 0;
710         }
711         btrfs_free_path(path);
712         return ret;
713 }
714
715 /*
716  * helper function to lookup reference count and flags of a tree block.
717  *
718  * the head node for delayed ref is used to store the sum of all the
719  * reference count modifications queued up in the rbtree. the head
720  * node may also store the extent flags to set. This way you can check
721  * to see what the reference count and extent flags would be if all of
722  * the delayed refs are not processed.
723  */
724 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
725                              struct btrfs_root *root, u64 bytenr,
726                              u64 offset, int metadata, u64 *refs, u64 *flags)
727 {
728         struct btrfs_delayed_ref_head *head;
729         struct btrfs_delayed_ref_root *delayed_refs;
730         struct btrfs_path *path;
731         struct btrfs_extent_item *ei;
732         struct extent_buffer *leaf;
733         struct btrfs_key key;
734         u32 item_size;
735         u64 num_refs;
736         u64 extent_flags;
737         int ret;
738
739         /*
740          * If we don't have skinny metadata, don't bother doing anything
741          * different
742          */
743         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
744                 offset = root->leafsize;
745                 metadata = 0;
746         }
747
748         path = btrfs_alloc_path();
749         if (!path)
750                 return -ENOMEM;
751
752         if (metadata) {
753                 key.objectid = bytenr;
754                 key.type = BTRFS_METADATA_ITEM_KEY;
755                 key.offset = offset;
756         } else {
757                 key.objectid = bytenr;
758                 key.type = BTRFS_EXTENT_ITEM_KEY;
759                 key.offset = offset;
760         }
761
762         if (!trans) {
763                 path->skip_locking = 1;
764                 path->search_commit_root = 1;
765         }
766 again:
767         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
768                                 &key, path, 0, 0);
769         if (ret < 0)
770                 goto out_free;
771
772         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
773                 key.type = BTRFS_EXTENT_ITEM_KEY;
774                 key.offset = root->leafsize;
775                 btrfs_release_path(path);
776                 goto again;
777         }
778
779         if (ret == 0) {
780                 leaf = path->nodes[0];
781                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
782                 if (item_size >= sizeof(*ei)) {
783                         ei = btrfs_item_ptr(leaf, path->slots[0],
784                                             struct btrfs_extent_item);
785                         num_refs = btrfs_extent_refs(leaf, ei);
786                         extent_flags = btrfs_extent_flags(leaf, ei);
787                 } else {
788 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
789                         struct btrfs_extent_item_v0 *ei0;
790                         BUG_ON(item_size != sizeof(*ei0));
791                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
792                                              struct btrfs_extent_item_v0);
793                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
794                         /* FIXME: this isn't correct for data */
795                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
796 #else
797                         BUG();
798 #endif
799                 }
800                 BUG_ON(num_refs == 0);
801         } else {
802                 num_refs = 0;
803                 extent_flags = 0;
804                 ret = 0;
805         }
806
807         if (!trans)
808                 goto out;
809
810         delayed_refs = &trans->transaction->delayed_refs;
811         spin_lock(&delayed_refs->lock);
812         head = btrfs_find_delayed_ref_head(trans, bytenr);
813         if (head) {
814                 if (!mutex_trylock(&head->mutex)) {
815                         atomic_inc(&head->node.refs);
816                         spin_unlock(&delayed_refs->lock);
817
818                         btrfs_release_path(path);
819
820                         /*
821                          * Mutex was contended, block until it's released and try
822                          * again
823                          */
824                         mutex_lock(&head->mutex);
825                         mutex_unlock(&head->mutex);
826                         btrfs_put_delayed_ref(&head->node);
827                         goto again;
828                 }
829                 if (head->extent_op && head->extent_op->update_flags)
830                         extent_flags |= head->extent_op->flags_to_set;
831                 else
832                         BUG_ON(num_refs == 0);
833
834                 num_refs += head->node.ref_mod;
835                 mutex_unlock(&head->mutex);
836         }
837         spin_unlock(&delayed_refs->lock);
838 out:
839         WARN_ON(num_refs == 0);
840         if (refs)
841                 *refs = num_refs;
842         if (flags)
843                 *flags = extent_flags;
844 out_free:
845         btrfs_free_path(path);
846         return ret;
847 }
848
849 /*
850  * Back reference rules.  Back refs have three main goals:
851  *
852  * 1) differentiate between all holders of references to an extent so that
853  *    when a reference is dropped we can make sure it was a valid reference
854  *    before freeing the extent.
855  *
856  * 2) Provide enough information to quickly find the holders of an extent
857  *    if we notice a given block is corrupted or bad.
858  *
859  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
860  *    maintenance.  This is actually the same as #2, but with a slightly
861  *    different use case.
862  *
863  * There are two kinds of back refs. The implicit back refs is optimized
864  * for pointers in non-shared tree blocks. For a given pointer in a block,
865  * back refs of this kind provide information about the block's owner tree
866  * and the pointer's key. These information allow us to find the block by
867  * b-tree searching. The full back refs is for pointers in tree blocks not
868  * referenced by their owner trees. The location of tree block is recorded
869  * in the back refs. Actually the full back refs is generic, and can be
870  * used in all cases the implicit back refs is used. The major shortcoming
871  * of the full back refs is its overhead. Every time a tree block gets
872  * COWed, we have to update back refs entry for all pointers in it.
873  *
874  * For a newly allocated tree block, we use implicit back refs for
875  * pointers in it. This means most tree related operations only involve
876  * implicit back refs. For a tree block created in old transaction, the
877  * only way to drop a reference to it is COW it. So we can detect the
878  * event that tree block loses its owner tree's reference and do the
879  * back refs conversion.
880  *
881  * When a tree block is COW'd through a tree, there are four cases:
882  *
883  * The reference count of the block is one and the tree is the block's
884  * owner tree. Nothing to do in this case.
885  *
886  * The reference count of the block is one and the tree is not the
887  * block's owner tree. In this case, full back refs is used for pointers
888  * in the block. Remove these full back refs, add implicit back refs for
889  * every pointers in the new block.
890  *
891  * The reference count of the block is greater than one and the tree is
892  * the block's owner tree. In this case, implicit back refs is used for
893  * pointers in the block. Add full back refs for every pointers in the
894  * block, increase lower level extents' reference counts. The original
895  * implicit back refs are entailed to the new block.
896  *
897  * The reference count of the block is greater than one and the tree is
898  * not the block's owner tree. Add implicit back refs for every pointer in
899  * the new block, increase lower level extents' reference count.
900  *
901  * Back Reference Key composing:
902  *
903  * The key objectid corresponds to the first byte in the extent,
904  * The key type is used to differentiate between types of back refs.
905  * There are different meanings of the key offset for different types
906  * of back refs.
907  *
908  * File extents can be referenced by:
909  *
910  * - multiple snapshots, subvolumes, or different generations in one subvol
911  * - different files inside a single subvolume
912  * - different offsets inside a file (bookend extents in file.c)
913  *
914  * The extent ref structure for the implicit back refs has fields for:
915  *
916  * - Objectid of the subvolume root
917  * - objectid of the file holding the reference
918  * - original offset in the file
919  * - how many bookend extents
920  *
921  * The key offset for the implicit back refs is hash of the first
922  * three fields.
923  *
924  * The extent ref structure for the full back refs has field for:
925  *
926  * - number of pointers in the tree leaf
927  *
928  * The key offset for the implicit back refs is the first byte of
929  * the tree leaf
930  *
931  * When a file extent is allocated, The implicit back refs is used.
932  * the fields are filled in:
933  *
934  *     (root_key.objectid, inode objectid, offset in file, 1)
935  *
936  * When a file extent is removed file truncation, we find the
937  * corresponding implicit back refs and check the following fields:
938  *
939  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
940  *
941  * Btree extents can be referenced by:
942  *
943  * - Different subvolumes
944  *
945  * Both the implicit back refs and the full back refs for tree blocks
946  * only consist of key. The key offset for the implicit back refs is
947  * objectid of block's owner tree. The key offset for the full back refs
948  * is the first byte of parent block.
949  *
950  * When implicit back refs is used, information about the lowest key and
951  * level of the tree block are required. These information are stored in
952  * tree block info structure.
953  */
954
955 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
956 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
957                                   struct btrfs_root *root,
958                                   struct btrfs_path *path,
959                                   u64 owner, u32 extra_size)
960 {
961         struct btrfs_extent_item *item;
962         struct btrfs_extent_item_v0 *ei0;
963         struct btrfs_extent_ref_v0 *ref0;
964         struct btrfs_tree_block_info *bi;
965         struct extent_buffer *leaf;
966         struct btrfs_key key;
967         struct btrfs_key found_key;
968         u32 new_size = sizeof(*item);
969         u64 refs;
970         int ret;
971
972         leaf = path->nodes[0];
973         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
974
975         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
976         ei0 = btrfs_item_ptr(leaf, path->slots[0],
977                              struct btrfs_extent_item_v0);
978         refs = btrfs_extent_refs_v0(leaf, ei0);
979
980         if (owner == (u64)-1) {
981                 while (1) {
982                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
983                                 ret = btrfs_next_leaf(root, path);
984                                 if (ret < 0)
985                                         return ret;
986                                 BUG_ON(ret > 0); /* Corruption */
987                                 leaf = path->nodes[0];
988                         }
989                         btrfs_item_key_to_cpu(leaf, &found_key,
990                                               path->slots[0]);
991                         BUG_ON(key.objectid != found_key.objectid);
992                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
993                                 path->slots[0]++;
994                                 continue;
995                         }
996                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
997                                               struct btrfs_extent_ref_v0);
998                         owner = btrfs_ref_objectid_v0(leaf, ref0);
999                         break;
1000                 }
1001         }
1002         btrfs_release_path(path);
1003
1004         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1005                 new_size += sizeof(*bi);
1006
1007         new_size -= sizeof(*ei0);
1008         ret = btrfs_search_slot(trans, root, &key, path,
1009                                 new_size + extra_size, 1);
1010         if (ret < 0)
1011                 return ret;
1012         BUG_ON(ret); /* Corruption */
1013
1014         btrfs_extend_item(root, path, new_size);
1015
1016         leaf = path->nodes[0];
1017         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1018         btrfs_set_extent_refs(leaf, item, refs);
1019         /* FIXME: get real generation */
1020         btrfs_set_extent_generation(leaf, item, 0);
1021         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1022                 btrfs_set_extent_flags(leaf, item,
1023                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1024                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1025                 bi = (struct btrfs_tree_block_info *)(item + 1);
1026                 /* FIXME: get first key of the block */
1027                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1028                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1029         } else {
1030                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1031         }
1032         btrfs_mark_buffer_dirty(leaf);
1033         return 0;
1034 }
1035 #endif
1036
1037 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1038 {
1039         u32 high_crc = ~(u32)0;
1040         u32 low_crc = ~(u32)0;
1041         __le64 lenum;
1042
1043         lenum = cpu_to_le64(root_objectid);
1044         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1045         lenum = cpu_to_le64(owner);
1046         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1047         lenum = cpu_to_le64(offset);
1048         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1049
1050         return ((u64)high_crc << 31) ^ (u64)low_crc;
1051 }
1052
1053 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1054                                      struct btrfs_extent_data_ref *ref)
1055 {
1056         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1057                                     btrfs_extent_data_ref_objectid(leaf, ref),
1058                                     btrfs_extent_data_ref_offset(leaf, ref));
1059 }
1060
1061 static int match_extent_data_ref(struct extent_buffer *leaf,
1062                                  struct btrfs_extent_data_ref *ref,
1063                                  u64 root_objectid, u64 owner, u64 offset)
1064 {
1065         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1066             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1067             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1068                 return 0;
1069         return 1;
1070 }
1071
1072 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1073                                            struct btrfs_root *root,
1074                                            struct btrfs_path *path,
1075                                            u64 bytenr, u64 parent,
1076                                            u64 root_objectid,
1077                                            u64 owner, u64 offset)
1078 {
1079         struct btrfs_key key;
1080         struct btrfs_extent_data_ref *ref;
1081         struct extent_buffer *leaf;
1082         u32 nritems;
1083         int ret;
1084         int recow;
1085         int err = -ENOENT;
1086
1087         key.objectid = bytenr;
1088         if (parent) {
1089                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1090                 key.offset = parent;
1091         } else {
1092                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1093                 key.offset = hash_extent_data_ref(root_objectid,
1094                                                   owner, offset);
1095         }
1096 again:
1097         recow = 0;
1098         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1099         if (ret < 0) {
1100                 err = ret;
1101                 goto fail;
1102         }
1103
1104         if (parent) {
1105                 if (!ret)
1106                         return 0;
1107 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1108                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1109                 btrfs_release_path(path);
1110                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1111                 if (ret < 0) {
1112                         err = ret;
1113                         goto fail;
1114                 }
1115                 if (!ret)
1116                         return 0;
1117 #endif
1118                 goto fail;
1119         }
1120
1121         leaf = path->nodes[0];
1122         nritems = btrfs_header_nritems(leaf);
1123         while (1) {
1124                 if (path->slots[0] >= nritems) {
1125                         ret = btrfs_next_leaf(root, path);
1126                         if (ret < 0)
1127                                 err = ret;
1128                         if (ret)
1129                                 goto fail;
1130
1131                         leaf = path->nodes[0];
1132                         nritems = btrfs_header_nritems(leaf);
1133                         recow = 1;
1134                 }
1135
1136                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1137                 if (key.objectid != bytenr ||
1138                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1139                         goto fail;
1140
1141                 ref = btrfs_item_ptr(leaf, path->slots[0],
1142                                      struct btrfs_extent_data_ref);
1143
1144                 if (match_extent_data_ref(leaf, ref, root_objectid,
1145                                           owner, offset)) {
1146                         if (recow) {
1147                                 btrfs_release_path(path);
1148                                 goto again;
1149                         }
1150                         err = 0;
1151                         break;
1152                 }
1153                 path->slots[0]++;
1154         }
1155 fail:
1156         return err;
1157 }
1158
1159 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1160                                            struct btrfs_root *root,
1161                                            struct btrfs_path *path,
1162                                            u64 bytenr, u64 parent,
1163                                            u64 root_objectid, u64 owner,
1164                                            u64 offset, int refs_to_add)
1165 {
1166         struct btrfs_key key;
1167         struct extent_buffer *leaf;
1168         u32 size;
1169         u32 num_refs;
1170         int ret;
1171
1172         key.objectid = bytenr;
1173         if (parent) {
1174                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1175                 key.offset = parent;
1176                 size = sizeof(struct btrfs_shared_data_ref);
1177         } else {
1178                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1179                 key.offset = hash_extent_data_ref(root_objectid,
1180                                                   owner, offset);
1181                 size = sizeof(struct btrfs_extent_data_ref);
1182         }
1183
1184         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1185         if (ret && ret != -EEXIST)
1186                 goto fail;
1187
1188         leaf = path->nodes[0];
1189         if (parent) {
1190                 struct btrfs_shared_data_ref *ref;
1191                 ref = btrfs_item_ptr(leaf, path->slots[0],
1192                                      struct btrfs_shared_data_ref);
1193                 if (ret == 0) {
1194                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1195                 } else {
1196                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1197                         num_refs += refs_to_add;
1198                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1199                 }
1200         } else {
1201                 struct btrfs_extent_data_ref *ref;
1202                 while (ret == -EEXIST) {
1203                         ref = btrfs_item_ptr(leaf, path->slots[0],
1204                                              struct btrfs_extent_data_ref);
1205                         if (match_extent_data_ref(leaf, ref, root_objectid,
1206                                                   owner, offset))
1207                                 break;
1208                         btrfs_release_path(path);
1209                         key.offset++;
1210                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1211                                                       size);
1212                         if (ret && ret != -EEXIST)
1213                                 goto fail;
1214
1215                         leaf = path->nodes[0];
1216                 }
1217                 ref = btrfs_item_ptr(leaf, path->slots[0],
1218                                      struct btrfs_extent_data_ref);
1219                 if (ret == 0) {
1220                         btrfs_set_extent_data_ref_root(leaf, ref,
1221                                                        root_objectid);
1222                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1223                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1224                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1225                 } else {
1226                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1227                         num_refs += refs_to_add;
1228                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1229                 }
1230         }
1231         btrfs_mark_buffer_dirty(leaf);
1232         ret = 0;
1233 fail:
1234         btrfs_release_path(path);
1235         return ret;
1236 }
1237
1238 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1239                                            struct btrfs_root *root,
1240                                            struct btrfs_path *path,
1241                                            int refs_to_drop)
1242 {
1243         struct btrfs_key key;
1244         struct btrfs_extent_data_ref *ref1 = NULL;
1245         struct btrfs_shared_data_ref *ref2 = NULL;
1246         struct extent_buffer *leaf;
1247         u32 num_refs = 0;
1248         int ret = 0;
1249
1250         leaf = path->nodes[0];
1251         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1252
1253         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1254                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1255                                       struct btrfs_extent_data_ref);
1256                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1257         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1258                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1259                                       struct btrfs_shared_data_ref);
1260                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1261 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1262         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1263                 struct btrfs_extent_ref_v0 *ref0;
1264                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1265                                       struct btrfs_extent_ref_v0);
1266                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1267 #endif
1268         } else {
1269                 BUG();
1270         }
1271
1272         BUG_ON(num_refs < refs_to_drop);
1273         num_refs -= refs_to_drop;
1274
1275         if (num_refs == 0) {
1276                 ret = btrfs_del_item(trans, root, path);
1277         } else {
1278                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1279                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1280                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1281                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1282 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1283                 else {
1284                         struct btrfs_extent_ref_v0 *ref0;
1285                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1286                                         struct btrfs_extent_ref_v0);
1287                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1288                 }
1289 #endif
1290                 btrfs_mark_buffer_dirty(leaf);
1291         }
1292         return ret;
1293 }
1294
1295 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1296                                           struct btrfs_path *path,
1297                                           struct btrfs_extent_inline_ref *iref)
1298 {
1299         struct btrfs_key key;
1300         struct extent_buffer *leaf;
1301         struct btrfs_extent_data_ref *ref1;
1302         struct btrfs_shared_data_ref *ref2;
1303         u32 num_refs = 0;
1304
1305         leaf = path->nodes[0];
1306         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1307         if (iref) {
1308                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1309                     BTRFS_EXTENT_DATA_REF_KEY) {
1310                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1311                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1312                 } else {
1313                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1314                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1315                 }
1316         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1317                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1318                                       struct btrfs_extent_data_ref);
1319                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1320         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1321                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1322                                       struct btrfs_shared_data_ref);
1323                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1324 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1325         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1326                 struct btrfs_extent_ref_v0 *ref0;
1327                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1328                                       struct btrfs_extent_ref_v0);
1329                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1330 #endif
1331         } else {
1332                 WARN_ON(1);
1333         }
1334         return num_refs;
1335 }
1336
1337 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1338                                           struct btrfs_root *root,
1339                                           struct btrfs_path *path,
1340                                           u64 bytenr, u64 parent,
1341                                           u64 root_objectid)
1342 {
1343         struct btrfs_key key;
1344         int ret;
1345
1346         key.objectid = bytenr;
1347         if (parent) {
1348                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1349                 key.offset = parent;
1350         } else {
1351                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1352                 key.offset = root_objectid;
1353         }
1354
1355         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1356         if (ret > 0)
1357                 ret = -ENOENT;
1358 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1359         if (ret == -ENOENT && parent) {
1360                 btrfs_release_path(path);
1361                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1362                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1363                 if (ret > 0)
1364                         ret = -ENOENT;
1365         }
1366 #endif
1367         return ret;
1368 }
1369
1370 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1371                                           struct btrfs_root *root,
1372                                           struct btrfs_path *path,
1373                                           u64 bytenr, u64 parent,
1374                                           u64 root_objectid)
1375 {
1376         struct btrfs_key key;
1377         int ret;
1378
1379         key.objectid = bytenr;
1380         if (parent) {
1381                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1382                 key.offset = parent;
1383         } else {
1384                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1385                 key.offset = root_objectid;
1386         }
1387
1388         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1389         btrfs_release_path(path);
1390         return ret;
1391 }
1392
1393 static inline int extent_ref_type(u64 parent, u64 owner)
1394 {
1395         int type;
1396         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1397                 if (parent > 0)
1398                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1399                 else
1400                         type = BTRFS_TREE_BLOCK_REF_KEY;
1401         } else {
1402                 if (parent > 0)
1403                         type = BTRFS_SHARED_DATA_REF_KEY;
1404                 else
1405                         type = BTRFS_EXTENT_DATA_REF_KEY;
1406         }
1407         return type;
1408 }
1409
1410 static int find_next_key(struct btrfs_path *path, int level,
1411                          struct btrfs_key *key)
1412
1413 {
1414         for (; level < BTRFS_MAX_LEVEL; level++) {
1415                 if (!path->nodes[level])
1416                         break;
1417                 if (path->slots[level] + 1 >=
1418                     btrfs_header_nritems(path->nodes[level]))
1419                         continue;
1420                 if (level == 0)
1421                         btrfs_item_key_to_cpu(path->nodes[level], key,
1422                                               path->slots[level] + 1);
1423                 else
1424                         btrfs_node_key_to_cpu(path->nodes[level], key,
1425                                               path->slots[level] + 1);
1426                 return 0;
1427         }
1428         return 1;
1429 }
1430
1431 /*
1432  * look for inline back ref. if back ref is found, *ref_ret is set
1433  * to the address of inline back ref, and 0 is returned.
1434  *
1435  * if back ref isn't found, *ref_ret is set to the address where it
1436  * should be inserted, and -ENOENT is returned.
1437  *
1438  * if insert is true and there are too many inline back refs, the path
1439  * points to the extent item, and -EAGAIN is returned.
1440  *
1441  * NOTE: inline back refs are ordered in the same way that back ref
1442  *       items in the tree are ordered.
1443  */
1444 static noinline_for_stack
1445 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1446                                  struct btrfs_root *root,
1447                                  struct btrfs_path *path,
1448                                  struct btrfs_extent_inline_ref **ref_ret,
1449                                  u64 bytenr, u64 num_bytes,
1450                                  u64 parent, u64 root_objectid,
1451                                  u64 owner, u64 offset, int insert)
1452 {
1453         struct btrfs_key key;
1454         struct extent_buffer *leaf;
1455         struct btrfs_extent_item *ei;
1456         struct btrfs_extent_inline_ref *iref;
1457         u64 flags;
1458         u64 item_size;
1459         unsigned long ptr;
1460         unsigned long end;
1461         int extra_size;
1462         int type;
1463         int want;
1464         int ret;
1465         int err = 0;
1466         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1467                                                  SKINNY_METADATA);
1468
1469         key.objectid = bytenr;
1470         key.type = BTRFS_EXTENT_ITEM_KEY;
1471         key.offset = num_bytes;
1472
1473         want = extent_ref_type(parent, owner);
1474         if (insert) {
1475                 extra_size = btrfs_extent_inline_ref_size(want);
1476                 path->keep_locks = 1;
1477         } else
1478                 extra_size = -1;
1479
1480         /*
1481          * Owner is our parent level, so we can just add one to get the level
1482          * for the block we are interested in.
1483          */
1484         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1485                 key.type = BTRFS_METADATA_ITEM_KEY;
1486                 key.offset = owner;
1487         }
1488
1489 again:
1490         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1491         if (ret < 0) {
1492                 err = ret;
1493                 goto out;
1494         }
1495
1496         /*
1497          * We may be a newly converted file system which still has the old fat
1498          * extent entries for metadata, so try and see if we have one of those.
1499          */
1500         if (ret > 0 && skinny_metadata) {
1501                 skinny_metadata = false;
1502                 if (path->slots[0]) {
1503                         path->slots[0]--;
1504                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1505                                               path->slots[0]);
1506                         if (key.objectid == bytenr &&
1507                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1508                             key.offset == num_bytes)
1509                                 ret = 0;
1510                 }
1511                 if (ret) {
1512                         key.type = BTRFS_EXTENT_ITEM_KEY;
1513                         key.offset = num_bytes;
1514                         btrfs_release_path(path);
1515                         goto again;
1516                 }
1517         }
1518
1519         if (ret && !insert) {
1520                 err = -ENOENT;
1521                 goto out;
1522         } else if (ret) {
1523                 err = -EIO;
1524                 WARN_ON(1);
1525                 goto out;
1526         }
1527
1528         leaf = path->nodes[0];
1529         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1530 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1531         if (item_size < sizeof(*ei)) {
1532                 if (!insert) {
1533                         err = -ENOENT;
1534                         goto out;
1535                 }
1536                 ret = convert_extent_item_v0(trans, root, path, owner,
1537                                              extra_size);
1538                 if (ret < 0) {
1539                         err = ret;
1540                         goto out;
1541                 }
1542                 leaf = path->nodes[0];
1543                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1544         }
1545 #endif
1546         BUG_ON(item_size < sizeof(*ei));
1547
1548         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1549         flags = btrfs_extent_flags(leaf, ei);
1550
1551         ptr = (unsigned long)(ei + 1);
1552         end = (unsigned long)ei + item_size;
1553
1554         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1555                 ptr += sizeof(struct btrfs_tree_block_info);
1556                 BUG_ON(ptr > end);
1557         }
1558
1559         err = -ENOENT;
1560         while (1) {
1561                 if (ptr >= end) {
1562                         WARN_ON(ptr > end);
1563                         break;
1564                 }
1565                 iref = (struct btrfs_extent_inline_ref *)ptr;
1566                 type = btrfs_extent_inline_ref_type(leaf, iref);
1567                 if (want < type)
1568                         break;
1569                 if (want > type) {
1570                         ptr += btrfs_extent_inline_ref_size(type);
1571                         continue;
1572                 }
1573
1574                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1575                         struct btrfs_extent_data_ref *dref;
1576                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1577                         if (match_extent_data_ref(leaf, dref, root_objectid,
1578                                                   owner, offset)) {
1579                                 err = 0;
1580                                 break;
1581                         }
1582                         if (hash_extent_data_ref_item(leaf, dref) <
1583                             hash_extent_data_ref(root_objectid, owner, offset))
1584                                 break;
1585                 } else {
1586                         u64 ref_offset;
1587                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1588                         if (parent > 0) {
1589                                 if (parent == ref_offset) {
1590                                         err = 0;
1591                                         break;
1592                                 }
1593                                 if (ref_offset < parent)
1594                                         break;
1595                         } else {
1596                                 if (root_objectid == ref_offset) {
1597                                         err = 0;
1598                                         break;
1599                                 }
1600                                 if (ref_offset < root_objectid)
1601                                         break;
1602                         }
1603                 }
1604                 ptr += btrfs_extent_inline_ref_size(type);
1605         }
1606         if (err == -ENOENT && insert) {
1607                 if (item_size + extra_size >=
1608                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1609                         err = -EAGAIN;
1610                         goto out;
1611                 }
1612                 /*
1613                  * To add new inline back ref, we have to make sure
1614                  * there is no corresponding back ref item.
1615                  * For simplicity, we just do not add new inline back
1616                  * ref if there is any kind of item for this block
1617                  */
1618                 if (find_next_key(path, 0, &key) == 0 &&
1619                     key.objectid == bytenr &&
1620                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1621                         err = -EAGAIN;
1622                         goto out;
1623                 }
1624         }
1625         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1626 out:
1627         if (insert) {
1628                 path->keep_locks = 0;
1629                 btrfs_unlock_up_safe(path, 1);
1630         }
1631         return err;
1632 }
1633
1634 /*
1635  * helper to add new inline back ref
1636  */
1637 static noinline_for_stack
1638 void setup_inline_extent_backref(struct btrfs_root *root,
1639                                  struct btrfs_path *path,
1640                                  struct btrfs_extent_inline_ref *iref,
1641                                  u64 parent, u64 root_objectid,
1642                                  u64 owner, u64 offset, int refs_to_add,
1643                                  struct btrfs_delayed_extent_op *extent_op)
1644 {
1645         struct extent_buffer *leaf;
1646         struct btrfs_extent_item *ei;
1647         unsigned long ptr;
1648         unsigned long end;
1649         unsigned long item_offset;
1650         u64 refs;
1651         int size;
1652         int type;
1653
1654         leaf = path->nodes[0];
1655         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1656         item_offset = (unsigned long)iref - (unsigned long)ei;
1657
1658         type = extent_ref_type(parent, owner);
1659         size = btrfs_extent_inline_ref_size(type);
1660
1661         btrfs_extend_item(root, path, size);
1662
1663         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1664         refs = btrfs_extent_refs(leaf, ei);
1665         refs += refs_to_add;
1666         btrfs_set_extent_refs(leaf, ei, refs);
1667         if (extent_op)
1668                 __run_delayed_extent_op(extent_op, leaf, ei);
1669
1670         ptr = (unsigned long)ei + item_offset;
1671         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1672         if (ptr < end - size)
1673                 memmove_extent_buffer(leaf, ptr + size, ptr,
1674                                       end - size - ptr);
1675
1676         iref = (struct btrfs_extent_inline_ref *)ptr;
1677         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1678         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1679                 struct btrfs_extent_data_ref *dref;
1680                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1681                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1682                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1683                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1684                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1685         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1686                 struct btrfs_shared_data_ref *sref;
1687                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1688                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1689                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1690         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1691                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1692         } else {
1693                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1694         }
1695         btrfs_mark_buffer_dirty(leaf);
1696 }
1697
1698 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1699                                  struct btrfs_root *root,
1700                                  struct btrfs_path *path,
1701                                  struct btrfs_extent_inline_ref **ref_ret,
1702                                  u64 bytenr, u64 num_bytes, u64 parent,
1703                                  u64 root_objectid, u64 owner, u64 offset)
1704 {
1705         int ret;
1706
1707         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1708                                            bytenr, num_bytes, parent,
1709                                            root_objectid, owner, offset, 0);
1710         if (ret != -ENOENT)
1711                 return ret;
1712
1713         btrfs_release_path(path);
1714         *ref_ret = NULL;
1715
1716         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1717                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1718                                             root_objectid);
1719         } else {
1720                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1721                                              root_objectid, owner, offset);
1722         }
1723         return ret;
1724 }
1725
1726 /*
1727  * helper to update/remove inline back ref
1728  */
1729 static noinline_for_stack
1730 void update_inline_extent_backref(struct btrfs_root *root,
1731                                   struct btrfs_path *path,
1732                                   struct btrfs_extent_inline_ref *iref,
1733                                   int refs_to_mod,
1734                                   struct btrfs_delayed_extent_op *extent_op)
1735 {
1736         struct extent_buffer *leaf;
1737         struct btrfs_extent_item *ei;
1738         struct btrfs_extent_data_ref *dref = NULL;
1739         struct btrfs_shared_data_ref *sref = NULL;
1740         unsigned long ptr;
1741         unsigned long end;
1742         u32 item_size;
1743         int size;
1744         int type;
1745         u64 refs;
1746
1747         leaf = path->nodes[0];
1748         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1749         refs = btrfs_extent_refs(leaf, ei);
1750         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1751         refs += refs_to_mod;
1752         btrfs_set_extent_refs(leaf, ei, refs);
1753         if (extent_op)
1754                 __run_delayed_extent_op(extent_op, leaf, ei);
1755
1756         type = btrfs_extent_inline_ref_type(leaf, iref);
1757
1758         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1759                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1760                 refs = btrfs_extent_data_ref_count(leaf, dref);
1761         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1762                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1763                 refs = btrfs_shared_data_ref_count(leaf, sref);
1764         } else {
1765                 refs = 1;
1766                 BUG_ON(refs_to_mod != -1);
1767         }
1768
1769         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1770         refs += refs_to_mod;
1771
1772         if (refs > 0) {
1773                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1774                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1775                 else
1776                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1777         } else {
1778                 size =  btrfs_extent_inline_ref_size(type);
1779                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1780                 ptr = (unsigned long)iref;
1781                 end = (unsigned long)ei + item_size;
1782                 if (ptr + size < end)
1783                         memmove_extent_buffer(leaf, ptr, ptr + size,
1784                                               end - ptr - size);
1785                 item_size -= size;
1786                 btrfs_truncate_item(root, path, item_size, 1);
1787         }
1788         btrfs_mark_buffer_dirty(leaf);
1789 }
1790
1791 static noinline_for_stack
1792 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1793                                  struct btrfs_root *root,
1794                                  struct btrfs_path *path,
1795                                  u64 bytenr, u64 num_bytes, u64 parent,
1796                                  u64 root_objectid, u64 owner,
1797                                  u64 offset, int refs_to_add,
1798                                  struct btrfs_delayed_extent_op *extent_op)
1799 {
1800         struct btrfs_extent_inline_ref *iref;
1801         int ret;
1802
1803         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1804                                            bytenr, num_bytes, parent,
1805                                            root_objectid, owner, offset, 1);
1806         if (ret == 0) {
1807                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1808                 update_inline_extent_backref(root, path, iref,
1809                                              refs_to_add, extent_op);
1810         } else if (ret == -ENOENT) {
1811                 setup_inline_extent_backref(root, path, iref, parent,
1812                                             root_objectid, owner, offset,
1813                                             refs_to_add, extent_op);
1814                 ret = 0;
1815         }
1816         return ret;
1817 }
1818
1819 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1820                                  struct btrfs_root *root,
1821                                  struct btrfs_path *path,
1822                                  u64 bytenr, u64 parent, u64 root_objectid,
1823                                  u64 owner, u64 offset, int refs_to_add)
1824 {
1825         int ret;
1826         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1827                 BUG_ON(refs_to_add != 1);
1828                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1829                                             parent, root_objectid);
1830         } else {
1831                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1832                                              parent, root_objectid,
1833                                              owner, offset, refs_to_add);
1834         }
1835         return ret;
1836 }
1837
1838 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1839                                  struct btrfs_root *root,
1840                                  struct btrfs_path *path,
1841                                  struct btrfs_extent_inline_ref *iref,
1842                                  int refs_to_drop, int is_data)
1843 {
1844         int ret = 0;
1845
1846         BUG_ON(!is_data && refs_to_drop != 1);
1847         if (iref) {
1848                 update_inline_extent_backref(root, path, iref,
1849                                              -refs_to_drop, NULL);
1850         } else if (is_data) {
1851                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1852         } else {
1853                 ret = btrfs_del_item(trans, root, path);
1854         }
1855         return ret;
1856 }
1857
1858 static int btrfs_issue_discard(struct block_device *bdev,
1859                                 u64 start, u64 len)
1860 {
1861         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1862 }
1863
1864 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1865                                 u64 num_bytes, u64 *actual_bytes)
1866 {
1867         int ret;
1868         u64 discarded_bytes = 0;
1869         struct btrfs_bio *bbio = NULL;
1870
1871
1872         /* Tell the block device(s) that the sectors can be discarded */
1873         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1874                               bytenr, &num_bytes, &bbio, 0);
1875         /* Error condition is -ENOMEM */
1876         if (!ret) {
1877                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1878                 int i;
1879
1880
1881                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1882                         if (!stripe->dev->can_discard)
1883                                 continue;
1884
1885                         ret = btrfs_issue_discard(stripe->dev->bdev,
1886                                                   stripe->physical,
1887                                                   stripe->length);
1888                         if (!ret)
1889                                 discarded_bytes += stripe->length;
1890                         else if (ret != -EOPNOTSUPP)
1891                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1892
1893                         /*
1894                          * Just in case we get back EOPNOTSUPP for some reason,
1895                          * just ignore the return value so we don't screw up
1896                          * people calling discard_extent.
1897                          */
1898                         ret = 0;
1899                 }
1900                 kfree(bbio);
1901         }
1902
1903         if (actual_bytes)
1904                 *actual_bytes = discarded_bytes;
1905
1906
1907         if (ret == -EOPNOTSUPP)
1908                 ret = 0;
1909         return ret;
1910 }
1911
1912 /* Can return -ENOMEM */
1913 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1914                          struct btrfs_root *root,
1915                          u64 bytenr, u64 num_bytes, u64 parent,
1916                          u64 root_objectid, u64 owner, u64 offset, int for_cow)
1917 {
1918         int ret;
1919         struct btrfs_fs_info *fs_info = root->fs_info;
1920
1921         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1922                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1923
1924         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1925                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1926                                         num_bytes,
1927                                         parent, root_objectid, (int)owner,
1928                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1929         } else {
1930                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1931                                         num_bytes,
1932                                         parent, root_objectid, owner, offset,
1933                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1934         }
1935         return ret;
1936 }
1937
1938 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1939                                   struct btrfs_root *root,
1940                                   u64 bytenr, u64 num_bytes,
1941                                   u64 parent, u64 root_objectid,
1942                                   u64 owner, u64 offset, int refs_to_add,
1943                                   struct btrfs_delayed_extent_op *extent_op)
1944 {
1945         struct btrfs_path *path;
1946         struct extent_buffer *leaf;
1947         struct btrfs_extent_item *item;
1948         u64 refs;
1949         int ret;
1950         int err = 0;
1951
1952         path = btrfs_alloc_path();
1953         if (!path)
1954                 return -ENOMEM;
1955
1956         path->reada = 1;
1957         path->leave_spinning = 1;
1958         /* this will setup the path even if it fails to insert the back ref */
1959         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1960                                            path, bytenr, num_bytes, parent,
1961                                            root_objectid, owner, offset,
1962                                            refs_to_add, extent_op);
1963         if (ret == 0)
1964                 goto out;
1965
1966         if (ret != -EAGAIN) {
1967                 err = ret;
1968                 goto out;
1969         }
1970
1971         leaf = path->nodes[0];
1972         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1973         refs = btrfs_extent_refs(leaf, item);
1974         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1975         if (extent_op)
1976                 __run_delayed_extent_op(extent_op, leaf, item);
1977
1978         btrfs_mark_buffer_dirty(leaf);
1979         btrfs_release_path(path);
1980
1981         path->reada = 1;
1982         path->leave_spinning = 1;
1983
1984         /* now insert the actual backref */
1985         ret = insert_extent_backref(trans, root->fs_info->extent_root,
1986                                     path, bytenr, parent, root_objectid,
1987                                     owner, offset, refs_to_add);
1988         if (ret)
1989                 btrfs_abort_transaction(trans, root, ret);
1990 out:
1991         btrfs_free_path(path);
1992         return err;
1993 }
1994
1995 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1996                                 struct btrfs_root *root,
1997                                 struct btrfs_delayed_ref_node *node,
1998                                 struct btrfs_delayed_extent_op *extent_op,
1999                                 int insert_reserved)
2000 {
2001         int ret = 0;
2002         struct btrfs_delayed_data_ref *ref;
2003         struct btrfs_key ins;
2004         u64 parent = 0;
2005         u64 ref_root = 0;
2006         u64 flags = 0;
2007
2008         ins.objectid = node->bytenr;
2009         ins.offset = node->num_bytes;
2010         ins.type = BTRFS_EXTENT_ITEM_KEY;
2011
2012         ref = btrfs_delayed_node_to_data_ref(node);
2013         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2014                 parent = ref->parent;
2015         else
2016                 ref_root = ref->root;
2017
2018         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2019                 if (extent_op)
2020                         flags |= extent_op->flags_to_set;
2021                 ret = alloc_reserved_file_extent(trans, root,
2022                                                  parent, ref_root, flags,
2023                                                  ref->objectid, ref->offset,
2024                                                  &ins, node->ref_mod);
2025         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2026                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2027                                              node->num_bytes, parent,
2028                                              ref_root, ref->objectid,
2029                                              ref->offset, node->ref_mod,
2030                                              extent_op);
2031         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2032                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2033                                           node->num_bytes, parent,
2034                                           ref_root, ref->objectid,
2035                                           ref->offset, node->ref_mod,
2036                                           extent_op);
2037         } else {
2038                 BUG();
2039         }
2040         return ret;
2041 }
2042
2043 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2044                                     struct extent_buffer *leaf,
2045                                     struct btrfs_extent_item *ei)
2046 {
2047         u64 flags = btrfs_extent_flags(leaf, ei);
2048         if (extent_op->update_flags) {
2049                 flags |= extent_op->flags_to_set;
2050                 btrfs_set_extent_flags(leaf, ei, flags);
2051         }
2052
2053         if (extent_op->update_key) {
2054                 struct btrfs_tree_block_info *bi;
2055                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2056                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2057                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2058         }
2059 }
2060
2061 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2062                                  struct btrfs_root *root,
2063                                  struct btrfs_delayed_ref_node *node,
2064                                  struct btrfs_delayed_extent_op *extent_op)
2065 {
2066         struct btrfs_key key;
2067         struct btrfs_path *path;
2068         struct btrfs_extent_item *ei;
2069         struct extent_buffer *leaf;
2070         u32 item_size;
2071         int ret;
2072         int err = 0;
2073         int metadata = !extent_op->is_data;
2074
2075         if (trans->aborted)
2076                 return 0;
2077
2078         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2079                 metadata = 0;
2080
2081         path = btrfs_alloc_path();
2082         if (!path)
2083                 return -ENOMEM;
2084
2085         key.objectid = node->bytenr;
2086
2087         if (metadata) {
2088                 key.type = BTRFS_METADATA_ITEM_KEY;
2089                 key.offset = extent_op->level;
2090         } else {
2091                 key.type = BTRFS_EXTENT_ITEM_KEY;
2092                 key.offset = node->num_bytes;
2093         }
2094
2095 again:
2096         path->reada = 1;
2097         path->leave_spinning = 1;
2098         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2099                                 path, 0, 1);
2100         if (ret < 0) {
2101                 err = ret;
2102                 goto out;
2103         }
2104         if (ret > 0) {
2105                 if (metadata) {
2106                         btrfs_release_path(path);
2107                         metadata = 0;
2108
2109                         key.offset = node->num_bytes;
2110                         key.type = BTRFS_EXTENT_ITEM_KEY;
2111                         goto again;
2112                 }
2113                 err = -EIO;
2114                 goto out;
2115         }
2116
2117         leaf = path->nodes[0];
2118         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2119 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2120         if (item_size < sizeof(*ei)) {
2121                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2122                                              path, (u64)-1, 0);
2123                 if (ret < 0) {
2124                         err = ret;
2125                         goto out;
2126                 }
2127                 leaf = path->nodes[0];
2128                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2129         }
2130 #endif
2131         BUG_ON(item_size < sizeof(*ei));
2132         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2133         __run_delayed_extent_op(extent_op, leaf, ei);
2134
2135         btrfs_mark_buffer_dirty(leaf);
2136 out:
2137         btrfs_free_path(path);
2138         return err;
2139 }
2140
2141 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2142                                 struct btrfs_root *root,
2143                                 struct btrfs_delayed_ref_node *node,
2144                                 struct btrfs_delayed_extent_op *extent_op,
2145                                 int insert_reserved)
2146 {
2147         int ret = 0;
2148         struct btrfs_delayed_tree_ref *ref;
2149         struct btrfs_key ins;
2150         u64 parent = 0;
2151         u64 ref_root = 0;
2152         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2153                                                  SKINNY_METADATA);
2154
2155         ref = btrfs_delayed_node_to_tree_ref(node);
2156         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2157                 parent = ref->parent;
2158         else
2159                 ref_root = ref->root;
2160
2161         ins.objectid = node->bytenr;
2162         if (skinny_metadata) {
2163                 ins.offset = ref->level;
2164                 ins.type = BTRFS_METADATA_ITEM_KEY;
2165         } else {
2166                 ins.offset = node->num_bytes;
2167                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2168         }
2169
2170         BUG_ON(node->ref_mod != 1);
2171         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2172                 BUG_ON(!extent_op || !extent_op->update_flags);
2173                 ret = alloc_reserved_tree_block(trans, root,
2174                                                 parent, ref_root,
2175                                                 extent_op->flags_to_set,
2176                                                 &extent_op->key,
2177                                                 ref->level, &ins);
2178         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2179                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2180                                              node->num_bytes, parent, ref_root,
2181                                              ref->level, 0, 1, extent_op);
2182         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2183                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2184                                           node->num_bytes, parent, ref_root,
2185                                           ref->level, 0, 1, extent_op);
2186         } else {
2187                 BUG();
2188         }
2189         return ret;
2190 }
2191
2192 /* helper function to actually process a single delayed ref entry */
2193 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2194                                struct btrfs_root *root,
2195                                struct btrfs_delayed_ref_node *node,
2196                                struct btrfs_delayed_extent_op *extent_op,
2197                                int insert_reserved)
2198 {
2199         int ret = 0;
2200
2201         if (trans->aborted)
2202                 return 0;
2203
2204         if (btrfs_delayed_ref_is_head(node)) {
2205                 struct btrfs_delayed_ref_head *head;
2206                 /*
2207                  * we've hit the end of the chain and we were supposed
2208                  * to insert this extent into the tree.  But, it got
2209                  * deleted before we ever needed to insert it, so all
2210                  * we have to do is clean up the accounting
2211                  */
2212                 BUG_ON(extent_op);
2213                 head = btrfs_delayed_node_to_head(node);
2214                 if (insert_reserved) {
2215                         btrfs_pin_extent(root, node->bytenr,
2216                                          node->num_bytes, 1);
2217                         if (head->is_data) {
2218                                 ret = btrfs_del_csums(trans, root,
2219                                                       node->bytenr,
2220                                                       node->num_bytes);
2221                         }
2222                 }
2223                 return ret;
2224         }
2225
2226         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2227             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2228                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2229                                            insert_reserved);
2230         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2231                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2232                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2233                                            insert_reserved);
2234         else
2235                 BUG();
2236         return ret;
2237 }
2238
2239 static noinline struct btrfs_delayed_ref_node *
2240 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2241 {
2242         struct rb_node *node;
2243         struct btrfs_delayed_ref_node *ref;
2244         int action = BTRFS_ADD_DELAYED_REF;
2245 again:
2246         /*
2247          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2248          * this prevents ref count from going down to zero when
2249          * there still are pending delayed ref.
2250          */
2251         node = rb_prev(&head->node.rb_node);
2252         while (1) {
2253                 if (!node)
2254                         break;
2255                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2256                                 rb_node);
2257                 if (ref->bytenr != head->node.bytenr)
2258                         break;
2259                 if (ref->action == action)
2260                         return ref;
2261                 node = rb_prev(node);
2262         }
2263         if (action == BTRFS_ADD_DELAYED_REF) {
2264                 action = BTRFS_DROP_DELAYED_REF;
2265                 goto again;
2266         }
2267         return NULL;
2268 }
2269
2270 /*
2271  * Returns 0 on success or if called with an already aborted transaction.
2272  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2273  */
2274 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2275                                        struct btrfs_root *root,
2276                                        struct list_head *cluster)
2277 {
2278         struct btrfs_delayed_ref_root *delayed_refs;
2279         struct btrfs_delayed_ref_node *ref;
2280         struct btrfs_delayed_ref_head *locked_ref = NULL;
2281         struct btrfs_delayed_extent_op *extent_op;
2282         struct btrfs_fs_info *fs_info = root->fs_info;
2283         int ret;
2284         int count = 0;
2285         int must_insert_reserved = 0;
2286
2287         delayed_refs = &trans->transaction->delayed_refs;
2288         while (1) {
2289                 if (!locked_ref) {
2290                         /* pick a new head ref from the cluster list */
2291                         if (list_empty(cluster))
2292                                 break;
2293
2294                         locked_ref = list_entry(cluster->next,
2295                                      struct btrfs_delayed_ref_head, cluster);
2296
2297                         /* grab the lock that says we are going to process
2298                          * all the refs for this head */
2299                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2300
2301                         /*
2302                          * we may have dropped the spin lock to get the head
2303                          * mutex lock, and that might have given someone else
2304                          * time to free the head.  If that's true, it has been
2305                          * removed from our list and we can move on.
2306                          */
2307                         if (ret == -EAGAIN) {
2308                                 locked_ref = NULL;
2309                                 count++;
2310                                 continue;
2311                         }
2312                 }
2313
2314                 /*
2315                  * We need to try and merge add/drops of the same ref since we
2316                  * can run into issues with relocate dropping the implicit ref
2317                  * and then it being added back again before the drop can
2318                  * finish.  If we merged anything we need to re-loop so we can
2319                  * get a good ref.
2320                  */
2321                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2322                                          locked_ref);
2323
2324                 /*
2325                  * locked_ref is the head node, so we have to go one
2326                  * node back for any delayed ref updates
2327                  */
2328                 ref = select_delayed_ref(locked_ref);
2329
2330                 if (ref && ref->seq &&
2331                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2332                         /*
2333                          * there are still refs with lower seq numbers in the
2334                          * process of being added. Don't run this ref yet.
2335                          */
2336                         list_del_init(&locked_ref->cluster);
2337                         btrfs_delayed_ref_unlock(locked_ref);
2338                         locked_ref = NULL;
2339                         delayed_refs->num_heads_ready++;
2340                         spin_unlock(&delayed_refs->lock);
2341                         cond_resched();
2342                         spin_lock(&delayed_refs->lock);
2343                         continue;
2344                 }
2345
2346                 /*
2347                  * record the must insert reserved flag before we
2348                  * drop the spin lock.
2349                  */
2350                 must_insert_reserved = locked_ref->must_insert_reserved;
2351                 locked_ref->must_insert_reserved = 0;
2352
2353                 extent_op = locked_ref->extent_op;
2354                 locked_ref->extent_op = NULL;
2355
2356                 if (!ref) {
2357                         /* All delayed refs have been processed, Go ahead
2358                          * and send the head node to run_one_delayed_ref,
2359                          * so that any accounting fixes can happen
2360                          */
2361                         ref = &locked_ref->node;
2362
2363                         if (extent_op && must_insert_reserved) {
2364                                 btrfs_free_delayed_extent_op(extent_op);
2365                                 extent_op = NULL;
2366                         }
2367
2368                         if (extent_op) {
2369                                 spin_unlock(&delayed_refs->lock);
2370
2371                                 ret = run_delayed_extent_op(trans, root,
2372                                                             ref, extent_op);
2373                                 btrfs_free_delayed_extent_op(extent_op);
2374
2375                                 if (ret) {
2376                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2377                                         spin_lock(&delayed_refs->lock);
2378                                         btrfs_delayed_ref_unlock(locked_ref);
2379                                         return ret;
2380                                 }
2381
2382                                 goto next;
2383                         }
2384                 }
2385
2386                 ref->in_tree = 0;
2387                 rb_erase(&ref->rb_node, &delayed_refs->root);
2388                 delayed_refs->num_entries--;
2389                 if (!btrfs_delayed_ref_is_head(ref)) {
2390                         /*
2391                          * when we play the delayed ref, also correct the
2392                          * ref_mod on head
2393                          */
2394                         switch (ref->action) {
2395                         case BTRFS_ADD_DELAYED_REF:
2396                         case BTRFS_ADD_DELAYED_EXTENT:
2397                                 locked_ref->node.ref_mod -= ref->ref_mod;
2398                                 break;
2399                         case BTRFS_DROP_DELAYED_REF:
2400                                 locked_ref->node.ref_mod += ref->ref_mod;
2401                                 break;
2402                         default:
2403                                 WARN_ON(1);
2404                         }
2405                 }
2406                 spin_unlock(&delayed_refs->lock);
2407
2408                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2409                                           must_insert_reserved);
2410
2411                 btrfs_free_delayed_extent_op(extent_op);
2412                 if (ret) {
2413                         btrfs_delayed_ref_unlock(locked_ref);
2414                         btrfs_put_delayed_ref(ref);
2415                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2416                         spin_lock(&delayed_refs->lock);
2417                         return ret;
2418                 }
2419
2420                 /*
2421                  * If this node is a head, that means all the refs in this head
2422                  * have been dealt with, and we will pick the next head to deal
2423                  * with, so we must unlock the head and drop it from the cluster
2424                  * list before we release it.
2425                  */
2426                 if (btrfs_delayed_ref_is_head(ref)) {
2427                         list_del_init(&locked_ref->cluster);
2428                         btrfs_delayed_ref_unlock(locked_ref);
2429                         locked_ref = NULL;
2430                 }
2431                 btrfs_put_delayed_ref(ref);
2432                 count++;
2433 next:
2434                 cond_resched();
2435                 spin_lock(&delayed_refs->lock);
2436         }
2437         return count;
2438 }
2439
2440 #ifdef SCRAMBLE_DELAYED_REFS
2441 /*
2442  * Normally delayed refs get processed in ascending bytenr order. This
2443  * correlates in most cases to the order added. To expose dependencies on this
2444  * order, we start to process the tree in the middle instead of the beginning
2445  */
2446 static u64 find_middle(struct rb_root *root)
2447 {
2448         struct rb_node *n = root->rb_node;
2449         struct btrfs_delayed_ref_node *entry;
2450         int alt = 1;
2451         u64 middle;
2452         u64 first = 0, last = 0;
2453
2454         n = rb_first(root);
2455         if (n) {
2456                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2457                 first = entry->bytenr;
2458         }
2459         n = rb_last(root);
2460         if (n) {
2461                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2462                 last = entry->bytenr;
2463         }
2464         n = root->rb_node;
2465
2466         while (n) {
2467                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2468                 WARN_ON(!entry->in_tree);
2469
2470                 middle = entry->bytenr;
2471
2472                 if (alt)
2473                         n = n->rb_left;
2474                 else
2475                         n = n->rb_right;
2476
2477                 alt = 1 - alt;
2478         }
2479         return middle;
2480 }
2481 #endif
2482
2483 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2484                                          struct btrfs_fs_info *fs_info)
2485 {
2486         struct qgroup_update *qgroup_update;
2487         int ret = 0;
2488
2489         if (list_empty(&trans->qgroup_ref_list) !=
2490             !trans->delayed_ref_elem.seq) {
2491                 /* list without seq or seq without list */
2492                 btrfs_err(fs_info,
2493                         "qgroup accounting update error, list is%s empty, seq is %#x.%x",
2494                         list_empty(&trans->qgroup_ref_list) ? "" : " not",
2495                         (u32)(trans->delayed_ref_elem.seq >> 32),
2496                         (u32)trans->delayed_ref_elem.seq);
2497                 BUG();
2498         }
2499
2500         if (!trans->delayed_ref_elem.seq)
2501                 return 0;
2502
2503         while (!list_empty(&trans->qgroup_ref_list)) {
2504                 qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2505                                                  struct qgroup_update, list);
2506                 list_del(&qgroup_update->list);
2507                 if (!ret)
2508                         ret = btrfs_qgroup_account_ref(
2509                                         trans, fs_info, qgroup_update->node,
2510                                         qgroup_update->extent_op);
2511                 kfree(qgroup_update);
2512         }
2513
2514         btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2515
2516         return ret;
2517 }
2518
2519 static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
2520                       int count)
2521 {
2522         int val = atomic_read(&delayed_refs->ref_seq);
2523
2524         if (val < seq || val >= seq + count)
2525                 return 1;
2526         return 0;
2527 }
2528
2529 /*
2530  * this starts processing the delayed reference count updates and
2531  * extent insertions we have queued up so far.  count can be
2532  * 0, which means to process everything in the tree at the start
2533  * of the run (but not newly added entries), or it can be some target
2534  * number you'd like to process.
2535  *
2536  * Returns 0 on success or if called with an aborted transaction
2537  * Returns <0 on error and aborts the transaction
2538  */
2539 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2540                            struct btrfs_root *root, unsigned long count)
2541 {
2542         struct rb_node *node;
2543         struct btrfs_delayed_ref_root *delayed_refs;
2544         struct btrfs_delayed_ref_node *ref;
2545         struct list_head cluster;
2546         int ret;
2547         u64 delayed_start;
2548         int run_all = count == (unsigned long)-1;
2549         int run_most = 0;
2550         int loops;
2551
2552         /* We'll clean this up in btrfs_cleanup_transaction */
2553         if (trans->aborted)
2554                 return 0;
2555
2556         if (root == root->fs_info->extent_root)
2557                 root = root->fs_info->tree_root;
2558
2559         btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2560
2561         delayed_refs = &trans->transaction->delayed_refs;
2562         INIT_LIST_HEAD(&cluster);
2563         if (count == 0) {
2564                 count = delayed_refs->num_entries * 2;
2565                 run_most = 1;
2566         }
2567
2568         if (!run_all && !run_most) {
2569                 int old;
2570                 int seq = atomic_read(&delayed_refs->ref_seq);
2571
2572 progress:
2573                 old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2574                 if (old) {
2575                         DEFINE_WAIT(__wait);
2576                         if (delayed_refs->num_entries < 16348)
2577                                 return 0;
2578
2579                         prepare_to_wait(&delayed_refs->wait, &__wait,
2580                                         TASK_UNINTERRUPTIBLE);
2581
2582                         old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2583                         if (old) {
2584                                 schedule();
2585                                 finish_wait(&delayed_refs->wait, &__wait);
2586
2587                                 if (!refs_newer(delayed_refs, seq, 256))
2588                                         goto progress;
2589                                 else
2590                                         return 0;
2591                         } else {
2592                                 finish_wait(&delayed_refs->wait, &__wait);
2593                                 goto again;
2594                         }
2595                 }
2596
2597         } else {
2598                 atomic_inc(&delayed_refs->procs_running_refs);
2599         }
2600
2601 again:
2602         loops = 0;
2603         spin_lock(&delayed_refs->lock);
2604
2605 #ifdef SCRAMBLE_DELAYED_REFS
2606         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2607 #endif
2608
2609         while (1) {
2610                 if (!(run_all || run_most) &&
2611                     delayed_refs->num_heads_ready < 64)
2612                         break;
2613
2614                 /*
2615                  * go find something we can process in the rbtree.  We start at
2616                  * the beginning of the tree, and then build a cluster
2617                  * of refs to process starting at the first one we are able to
2618                  * lock
2619                  */
2620                 delayed_start = delayed_refs->run_delayed_start;
2621                 ret = btrfs_find_ref_cluster(trans, &cluster,
2622                                              delayed_refs->run_delayed_start);
2623                 if (ret)
2624                         break;
2625
2626                 ret = run_clustered_refs(trans, root, &cluster);
2627                 if (ret < 0) {
2628                         btrfs_release_ref_cluster(&cluster);
2629                         spin_unlock(&delayed_refs->lock);
2630                         btrfs_abort_transaction(trans, root, ret);
2631                         atomic_dec(&delayed_refs->procs_running_refs);
2632                         return ret;
2633                 }
2634
2635                 atomic_add(ret, &delayed_refs->ref_seq);
2636
2637                 count -= min_t(unsigned long, ret, count);
2638
2639                 if (count == 0)
2640                         break;
2641
2642                 if (delayed_start >= delayed_refs->run_delayed_start) {
2643                         if (loops == 0) {
2644                                 /*
2645                                  * btrfs_find_ref_cluster looped. let's do one
2646                                  * more cycle. if we don't run any delayed ref
2647                                  * during that cycle (because we can't because
2648                                  * all of them are blocked), bail out.
2649                                  */
2650                                 loops = 1;
2651                         } else {
2652                                 /*
2653                                  * no runnable refs left, stop trying
2654                                  */
2655                                 BUG_ON(run_all);
2656                                 break;
2657                         }
2658                 }
2659                 if (ret) {
2660                         /* refs were run, let's reset staleness detection */
2661                         loops = 0;
2662                 }
2663         }
2664
2665         if (run_all) {
2666                 if (!list_empty(&trans->new_bgs)) {
2667                         spin_unlock(&delayed_refs->lock);
2668                         btrfs_create_pending_block_groups(trans, root);
2669                         spin_lock(&delayed_refs->lock);
2670                 }
2671
2672                 node = rb_first(&delayed_refs->root);
2673                 if (!node)
2674                         goto out;
2675                 count = (unsigned long)-1;
2676
2677                 while (node) {
2678                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2679                                        rb_node);
2680                         if (btrfs_delayed_ref_is_head(ref)) {
2681                                 struct btrfs_delayed_ref_head *head;
2682
2683                                 head = btrfs_delayed_node_to_head(ref);
2684                                 atomic_inc(&ref->refs);
2685
2686                                 spin_unlock(&delayed_refs->lock);
2687                                 /*
2688                                  * Mutex was contended, block until it's
2689                                  * released and try again
2690                                  */
2691                                 mutex_lock(&head->mutex);
2692                                 mutex_unlock(&head->mutex);
2693
2694                                 btrfs_put_delayed_ref(ref);
2695                                 cond_resched();
2696                                 goto again;
2697                         }
2698                         node = rb_next(node);
2699                 }
2700                 spin_unlock(&delayed_refs->lock);
2701                 schedule_timeout(1);
2702                 goto again;
2703         }
2704 out:
2705         atomic_dec(&delayed_refs->procs_running_refs);
2706         smp_mb();
2707         if (waitqueue_active(&delayed_refs->wait))
2708                 wake_up(&delayed_refs->wait);
2709
2710         spin_unlock(&delayed_refs->lock);
2711         assert_qgroups_uptodate(trans);
2712         return 0;
2713 }
2714
2715 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2716                                 struct btrfs_root *root,
2717                                 u64 bytenr, u64 num_bytes, u64 flags,
2718                                 int level, int is_data)
2719 {
2720         struct btrfs_delayed_extent_op *extent_op;
2721         int ret;
2722
2723         extent_op = btrfs_alloc_delayed_extent_op();
2724         if (!extent_op)
2725                 return -ENOMEM;
2726
2727         extent_op->flags_to_set = flags;
2728         extent_op->update_flags = 1;
2729         extent_op->update_key = 0;
2730         extent_op->is_data = is_data ? 1 : 0;
2731         extent_op->level = level;
2732
2733         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2734                                           num_bytes, extent_op);
2735         if (ret)
2736                 btrfs_free_delayed_extent_op(extent_op);
2737         return ret;
2738 }
2739
2740 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2741                                       struct btrfs_root *root,
2742                                       struct btrfs_path *path,
2743                                       u64 objectid, u64 offset, u64 bytenr)
2744 {
2745         struct btrfs_delayed_ref_head *head;
2746         struct btrfs_delayed_ref_node *ref;
2747         struct btrfs_delayed_data_ref *data_ref;
2748         struct btrfs_delayed_ref_root *delayed_refs;
2749         struct rb_node *node;
2750         int ret = 0;
2751
2752         ret = -ENOENT;
2753         delayed_refs = &trans->transaction->delayed_refs;
2754         spin_lock(&delayed_refs->lock);
2755         head = btrfs_find_delayed_ref_head(trans, bytenr);
2756         if (!head)
2757                 goto out;
2758
2759         if (!mutex_trylock(&head->mutex)) {
2760                 atomic_inc(&head->node.refs);
2761                 spin_unlock(&delayed_refs->lock);
2762
2763                 btrfs_release_path(path);
2764
2765                 /*
2766                  * Mutex was contended, block until it's released and let
2767                  * caller try again
2768                  */
2769                 mutex_lock(&head->mutex);
2770                 mutex_unlock(&head->mutex);
2771                 btrfs_put_delayed_ref(&head->node);
2772                 return -EAGAIN;
2773         }
2774
2775         node = rb_prev(&head->node.rb_node);
2776         if (!node)
2777                 goto out_unlock;
2778
2779         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2780
2781         if (ref->bytenr != bytenr)
2782                 goto out_unlock;
2783
2784         ret = 1;
2785         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2786                 goto out_unlock;
2787
2788         data_ref = btrfs_delayed_node_to_data_ref(ref);
2789
2790         node = rb_prev(node);
2791         if (node) {
2792                 int seq = ref->seq;
2793
2794                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2795                 if (ref->bytenr == bytenr && ref->seq == seq)
2796                         goto out_unlock;
2797         }
2798
2799         if (data_ref->root != root->root_key.objectid ||
2800             data_ref->objectid != objectid || data_ref->offset != offset)
2801                 goto out_unlock;
2802
2803         ret = 0;
2804 out_unlock:
2805         mutex_unlock(&head->mutex);
2806 out:
2807         spin_unlock(&delayed_refs->lock);
2808         return ret;
2809 }
2810
2811 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2812                                         struct btrfs_root *root,
2813                                         struct btrfs_path *path,
2814                                         u64 objectid, u64 offset, u64 bytenr)
2815 {
2816         struct btrfs_root *extent_root = root->fs_info->extent_root;
2817         struct extent_buffer *leaf;
2818         struct btrfs_extent_data_ref *ref;
2819         struct btrfs_extent_inline_ref *iref;
2820         struct btrfs_extent_item *ei;
2821         struct btrfs_key key;
2822         u32 item_size;
2823         int ret;
2824
2825         key.objectid = bytenr;
2826         key.offset = (u64)-1;
2827         key.type = BTRFS_EXTENT_ITEM_KEY;
2828
2829         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2830         if (ret < 0)
2831                 goto out;
2832         BUG_ON(ret == 0); /* Corruption */
2833
2834         ret = -ENOENT;
2835         if (path->slots[0] == 0)
2836                 goto out;
2837
2838         path->slots[0]--;
2839         leaf = path->nodes[0];
2840         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2841
2842         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2843                 goto out;
2844
2845         ret = 1;
2846         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2847 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2848         if (item_size < sizeof(*ei)) {
2849                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2850                 goto out;
2851         }
2852 #endif
2853         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2854
2855         if (item_size != sizeof(*ei) +
2856             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2857                 goto out;
2858
2859         if (btrfs_extent_generation(leaf, ei) <=
2860             btrfs_root_last_snapshot(&root->root_item))
2861                 goto out;
2862
2863         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2864         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2865             BTRFS_EXTENT_DATA_REF_KEY)
2866                 goto out;
2867
2868         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2869         if (btrfs_extent_refs(leaf, ei) !=
2870             btrfs_extent_data_ref_count(leaf, ref) ||
2871             btrfs_extent_data_ref_root(leaf, ref) !=
2872             root->root_key.objectid ||
2873             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2874             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2875                 goto out;
2876
2877         ret = 0;
2878 out:
2879         return ret;
2880 }
2881
2882 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2883                           struct btrfs_root *root,
2884                           u64 objectid, u64 offset, u64 bytenr)
2885 {
2886         struct btrfs_path *path;
2887         int ret;
2888         int ret2;
2889
2890         path = btrfs_alloc_path();
2891         if (!path)
2892                 return -ENOENT;
2893
2894         do {
2895                 ret = check_committed_ref(trans, root, path, objectid,
2896                                           offset, bytenr);
2897                 if (ret && ret != -ENOENT)
2898                         goto out;
2899
2900                 ret2 = check_delayed_ref(trans, root, path, objectid,
2901                                          offset, bytenr);
2902         } while (ret2 == -EAGAIN);
2903
2904         if (ret2 && ret2 != -ENOENT) {
2905                 ret = ret2;
2906                 goto out;
2907         }
2908
2909         if (ret != -ENOENT || ret2 != -ENOENT)
2910                 ret = 0;
2911 out:
2912         btrfs_free_path(path);
2913         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2914                 WARN_ON(ret > 0);
2915         return ret;
2916 }
2917
2918 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2919                            struct btrfs_root *root,
2920                            struct extent_buffer *buf,
2921                            int full_backref, int inc, int for_cow)
2922 {
2923         u64 bytenr;
2924         u64 num_bytes;
2925         u64 parent;
2926         u64 ref_root;
2927         u32 nritems;
2928         struct btrfs_key key;
2929         struct btrfs_file_extent_item *fi;
2930         int i;
2931         int level;
2932         int ret = 0;
2933         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2934                             u64, u64, u64, u64, u64, u64, int);
2935
2936         ref_root = btrfs_header_owner(buf);
2937         nritems = btrfs_header_nritems(buf);
2938         level = btrfs_header_level(buf);
2939
2940         if (!root->ref_cows && level == 0)
2941                 return 0;
2942
2943         if (inc)
2944                 process_func = btrfs_inc_extent_ref;
2945         else
2946                 process_func = btrfs_free_extent;
2947
2948         if (full_backref)
2949                 parent = buf->start;
2950         else
2951                 parent = 0;
2952
2953         for (i = 0; i < nritems; i++) {
2954                 if (level == 0) {
2955                         btrfs_item_key_to_cpu(buf, &key, i);
2956                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2957                                 continue;
2958                         fi = btrfs_item_ptr(buf, i,
2959                                             struct btrfs_file_extent_item);
2960                         if (btrfs_file_extent_type(buf, fi) ==
2961                             BTRFS_FILE_EXTENT_INLINE)
2962                                 continue;
2963                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2964                         if (bytenr == 0)
2965                                 continue;
2966
2967                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2968                         key.offset -= btrfs_file_extent_offset(buf, fi);
2969                         ret = process_func(trans, root, bytenr, num_bytes,
2970                                            parent, ref_root, key.objectid,
2971                                            key.offset, for_cow);
2972                         if (ret)
2973                                 goto fail;
2974                 } else {
2975                         bytenr = btrfs_node_blockptr(buf, i);
2976                         num_bytes = btrfs_level_size(root, level - 1);
2977                         ret = process_func(trans, root, bytenr, num_bytes,
2978                                            parent, ref_root, level - 1, 0,
2979                                            for_cow);
2980                         if (ret)
2981                                 goto fail;
2982                 }
2983         }
2984         return 0;
2985 fail:
2986         return ret;
2987 }
2988
2989 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2990                   struct extent_buffer *buf, int full_backref, int for_cow)
2991 {
2992         return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
2993 }
2994
2995 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2996                   struct extent_buffer *buf, int full_backref, int for_cow)
2997 {
2998         return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
2999 }
3000
3001 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3002                                  struct btrfs_root *root,
3003                                  struct btrfs_path *path,
3004                                  struct btrfs_block_group_cache *cache)
3005 {
3006         int ret;
3007         struct btrfs_root *extent_root = root->fs_info->extent_root;
3008         unsigned long bi;
3009         struct extent_buffer *leaf;
3010
3011         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3012         if (ret < 0)
3013                 goto fail;
3014         BUG_ON(ret); /* Corruption */
3015
3016         leaf = path->nodes[0];
3017         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3018         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3019         btrfs_mark_buffer_dirty(leaf);
3020         btrfs_release_path(path);
3021 fail:
3022         if (ret) {
3023                 btrfs_abort_transaction(trans, root, ret);
3024                 return ret;
3025         }
3026         return 0;
3027
3028 }
3029
3030 static struct btrfs_block_group_cache *
3031 next_block_group(struct btrfs_root *root,
3032                  struct btrfs_block_group_cache *cache)
3033 {
3034         struct rb_node *node;
3035         spin_lock(&root->fs_info->block_group_cache_lock);
3036         node = rb_next(&cache->cache_node);
3037         btrfs_put_block_group(cache);
3038         if (node) {
3039                 cache = rb_entry(node, struct btrfs_block_group_cache,
3040                                  cache_node);
3041                 btrfs_get_block_group(cache);
3042         } else
3043                 cache = NULL;
3044         spin_unlock(&root->fs_info->block_group_cache_lock);
3045         return cache;
3046 }
3047
3048 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3049                             struct btrfs_trans_handle *trans,
3050                             struct btrfs_path *path)
3051 {
3052         struct btrfs_root *root = block_group->fs_info->tree_root;
3053         struct inode *inode = NULL;
3054         u64 alloc_hint = 0;
3055         int dcs = BTRFS_DC_ERROR;
3056         int num_pages = 0;
3057         int retries = 0;
3058         int ret = 0;
3059
3060         /*
3061          * If this block group is smaller than 100 megs don't bother caching the
3062          * block group.
3063          */
3064         if (block_group->key.offset < (100 * 1024 * 1024)) {
3065                 spin_lock(&block_group->lock);
3066                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3067                 spin_unlock(&block_group->lock);
3068                 return 0;
3069         }
3070
3071 again:
3072         inode = lookup_free_space_inode(root, block_group, path);
3073         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3074                 ret = PTR_ERR(inode);
3075                 btrfs_release_path(path);
3076                 goto out;
3077         }
3078
3079         if (IS_ERR(inode)) {
3080                 BUG_ON(retries);
3081                 retries++;
3082
3083                 if (block_group->ro)
3084                         goto out_free;
3085
3086                 ret = create_free_space_inode(root, trans, block_group, path);
3087                 if (ret)
3088                         goto out_free;
3089                 goto again;
3090         }
3091
3092         /* We've already setup this transaction, go ahead and exit */
3093         if (block_group->cache_generation == trans->transid &&
3094             i_size_read(inode)) {
3095                 dcs = BTRFS_DC_SETUP;
3096                 goto out_put;
3097         }
3098
3099         /*
3100          * We want to set the generation to 0, that way if anything goes wrong
3101          * from here on out we know not to trust this cache when we load up next
3102          * time.
3103          */
3104         BTRFS_I(inode)->generation = 0;
3105         ret = btrfs_update_inode(trans, root, inode);
3106         WARN_ON(ret);
3107
3108         if (i_size_read(inode) > 0) {
3109                 ret = btrfs_truncate_free_space_cache(root, trans, path,
3110                                                       inode);
3111                 if (ret)
3112                         goto out_put;
3113         }
3114
3115         spin_lock(&block_group->lock);
3116         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3117             !btrfs_test_opt(root, SPACE_CACHE)) {
3118                 /*
3119                  * don't bother trying to write stuff out _if_
3120                  * a) we're not cached,
3121                  * b) we're with nospace_cache mount option.
3122                  */
3123                 dcs = BTRFS_DC_WRITTEN;
3124                 spin_unlock(&block_group->lock);
3125                 goto out_put;
3126         }
3127         spin_unlock(&block_group->lock);
3128
3129         /*
3130          * Try to preallocate enough space based on how big the block group is.
3131          * Keep in mind this has to include any pinned space which could end up
3132          * taking up quite a bit since it's not folded into the other space
3133          * cache.
3134          */
3135         num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3136         if (!num_pages)
3137                 num_pages = 1;
3138
3139         num_pages *= 16;
3140         num_pages *= PAGE_CACHE_SIZE;
3141
3142         ret = btrfs_check_data_free_space(inode, num_pages);
3143         if (ret)
3144                 goto out_put;
3145
3146         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3147                                               num_pages, num_pages,
3148                                               &alloc_hint);
3149         if (!ret)
3150                 dcs = BTRFS_DC_SETUP;
3151         btrfs_free_reserved_data_space(inode, num_pages);
3152
3153 out_put:
3154         iput(inode);
3155 out_free:
3156         btrfs_release_path(path);
3157 out:
3158         spin_lock(&block_group->lock);
3159         if (!ret && dcs == BTRFS_DC_SETUP)
3160                 block_group->cache_generation = trans->transid;
3161         block_group->disk_cache_state = dcs;
3162         spin_unlock(&block_group->lock);
3163
3164         return ret;
3165 }
3166
3167 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3168                                    struct btrfs_root *root)
3169 {
3170         struct btrfs_block_group_cache *cache;
3171         int err = 0;
3172         struct btrfs_path *path;
3173         u64 last = 0;
3174
3175         path = btrfs_alloc_path();
3176         if (!path)
3177                 return -ENOMEM;
3178
3179 again:
3180         while (1) {
3181                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3182                 while (cache) {
3183                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3184                                 break;
3185                         cache = next_block_group(root, cache);
3186                 }
3187                 if (!cache) {
3188                         if (last == 0)
3189                                 break;
3190                         last = 0;
3191                         continue;
3192                 }
3193                 err = cache_save_setup(cache, trans, path);
3194                 last = cache->key.objectid + cache->key.offset;
3195                 btrfs_put_block_group(cache);
3196         }
3197
3198         while (1) {
3199                 if (last == 0) {
3200                         err = btrfs_run_delayed_refs(trans, root,
3201                                                      (unsigned long)-1);
3202                         if (err) /* File system offline */
3203                                 goto out;
3204                 }
3205
3206                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3207                 while (cache) {
3208                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3209                                 btrfs_put_block_group(cache);
3210                                 goto again;
3211                         }
3212
3213                         if (cache->dirty)
3214                                 break;
3215                         cache = next_block_group(root, cache);
3216                 }
3217                 if (!cache) {
3218                         if (last == 0)
3219                                 break;
3220                         last = 0;
3221                         continue;
3222                 }
3223
3224                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3225                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3226                 cache->dirty = 0;
3227                 last = cache->key.objectid + cache->key.offset;
3228
3229                 err = write_one_cache_group(trans, root, path, cache);
3230                 if (err) /* File system offline */
3231                         goto out;
3232
3233                 btrfs_put_block_group(cache);
3234         }
3235
3236         while (1) {
3237                 /*
3238                  * I don't think this is needed since we're just marking our
3239                  * preallocated extent as written, but just in case it can't
3240                  * hurt.
3241                  */
3242                 if (last == 0) {
3243                         err = btrfs_run_delayed_refs(trans, root,
3244                                                      (unsigned long)-1);
3245                         if (err) /* File system offline */
3246                                 goto out;
3247                 }
3248
3249                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3250                 while (cache) {
3251                         /*
3252                          * Really this shouldn't happen, but it could if we
3253                          * couldn't write the entire preallocated extent and
3254                          * splitting the extent resulted in a new block.
3255                          */
3256                         if (cache->dirty) {
3257                                 btrfs_put_block_group(cache);
3258                                 goto again;
3259                         }
3260                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3261                                 break;
3262                         cache = next_block_group(root, cache);
3263                 }
3264                 if (!cache) {
3265                         if (last == 0)
3266                                 break;
3267                         last = 0;
3268                         continue;
3269                 }
3270
3271                 err = btrfs_write_out_cache(root, trans, cache, path);
3272
3273                 /*
3274                  * If we didn't have an error then the cache state is still
3275                  * NEED_WRITE, so we can set it to WRITTEN.
3276                  */
3277                 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3278                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3279                 last = cache->key.objectid + cache->key.offset;
3280                 btrfs_put_block_group(cache);
3281         }
3282 out:
3283
3284         btrfs_free_path(path);
3285         return err;
3286 }
3287
3288 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3289 {
3290         struct btrfs_block_group_cache *block_group;
3291         int readonly = 0;
3292
3293         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3294         if (!block_group || block_group->ro)
3295                 readonly = 1;
3296         if (block_group)
3297                 btrfs_put_block_group(block_group);
3298         return readonly;
3299 }
3300
3301 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3302                              u64 total_bytes, u64 bytes_used,
3303                              struct btrfs_space_info **space_info)
3304 {
3305         struct btrfs_space_info *found;
3306         int i;
3307         int factor;
3308
3309         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3310                      BTRFS_BLOCK_GROUP_RAID10))
3311                 factor = 2;
3312         else
3313                 factor = 1;
3314
3315         found = __find_space_info(info, flags);
3316         if (found) {
3317                 spin_lock(&found->lock);
3318                 found->total_bytes += total_bytes;
3319                 found->disk_total += total_bytes * factor;
3320                 found->bytes_used += bytes_used;
3321                 found->disk_used += bytes_used * factor;
3322                 found->full = 0;
3323                 spin_unlock(&found->lock);
3324                 *space_info = found;
3325                 return 0;
3326         }
3327         found = kzalloc(sizeof(*found), GFP_NOFS);
3328         if (!found)
3329                 return -ENOMEM;
3330
3331         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3332                 INIT_LIST_HEAD(&found->block_groups[i]);
3333         init_rwsem(&found->groups_sem);
3334         spin_lock_init(&found->lock);
3335         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3336         found->total_bytes = total_bytes;
3337         found->disk_total = total_bytes * factor;
3338         found->bytes_used = bytes_used;
3339         found->disk_used = bytes_used * factor;
3340         found->bytes_pinned = 0;
3341         found->bytes_reserved = 0;
3342         found->bytes_readonly = 0;
3343         found->bytes_may_use = 0;
3344         found->full = 0;
3345         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3346         found->chunk_alloc = 0;
3347         found->flush = 0;
3348         init_waitqueue_head(&found->wait);
3349         *space_info = found;
3350         list_add_rcu(&found->list, &info->space_info);
3351         if (flags & BTRFS_BLOCK_GROUP_DATA)
3352                 info->data_sinfo = found;
3353         return 0;
3354 }
3355
3356 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3357 {
3358         u64 extra_flags = chunk_to_extended(flags) &
3359                                 BTRFS_EXTENDED_PROFILE_MASK;
3360
3361         write_seqlock(&fs_info->profiles_lock);
3362         if (flags & BTRFS_BLOCK_GROUP_DATA)
3363                 fs_info->avail_data_alloc_bits |= extra_flags;
3364         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3365                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3366         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3367                 fs_info->avail_system_alloc_bits |= extra_flags;
3368         write_sequnlock(&fs_info->profiles_lock);
3369 }
3370
3371 /*
3372  * returns target flags in extended format or 0 if restripe for this
3373  * chunk_type is not in progress
3374  *
3375  * should be called with either volume_mutex or balance_lock held
3376  */
3377 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3378 {
3379         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3380         u64 target = 0;
3381
3382         if (!bctl)
3383                 return 0;
3384
3385         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3386             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3387                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3388         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3389                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3390                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3391         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3392                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3393                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3394         }
3395
3396         return target;
3397 }
3398
3399 /*
3400  * @flags: available profiles in extended format (see ctree.h)
3401  *
3402  * Returns reduced profile in chunk format.  If profile changing is in
3403  * progress (either running or paused) picks the target profile (if it's
3404  * already available), otherwise falls back to plain reducing.
3405  */
3406 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3407 {
3408         /*
3409          * we add in the count of missing devices because we want
3410          * to make sure that any RAID levels on a degraded FS
3411          * continue to be honored.
3412          */
3413         u64 num_devices = root->fs_info->fs_devices->rw_devices +
3414                 root->fs_info->fs_devices->missing_devices;
3415         u64 target;
3416         u64 tmp;
3417
3418         /*
3419          * see if restripe for this chunk_type is in progress, if so
3420          * try to reduce to the target profile
3421          */
3422         spin_lock(&root->fs_info->balance_lock);
3423         target = get_restripe_target(root->fs_info, flags);
3424         if (target) {
3425                 /* pick target profile only if it's already available */
3426                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3427                         spin_unlock(&root->fs_info->balance_lock);
3428                         return extended_to_chunk(target);
3429                 }
3430         }
3431         spin_unlock(&root->fs_info->balance_lock);
3432
3433         /* First, mask out the RAID levels which aren't possible */
3434         if (num_devices == 1)
3435                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3436                            BTRFS_BLOCK_GROUP_RAID5);
3437         if (num_devices < 3)
3438                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3439         if (num_devices < 4)
3440                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3441
3442         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3443                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3444                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3445         flags &= ~tmp;
3446
3447         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3448                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3449         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3450                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3451         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3452                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3453         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3454                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3455         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3456                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3457
3458         return extended_to_chunk(flags | tmp);
3459 }
3460
3461 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3462 {
3463         unsigned seq;
3464
3465         do {
3466                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3467
3468                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3469                         flags |= root->fs_info->avail_data_alloc_bits;
3470                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3471                         flags |= root->fs_info->avail_system_alloc_bits;
3472                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3473                         flags |= root->fs_info->avail_metadata_alloc_bits;
3474         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3475
3476         return btrfs_reduce_alloc_profile(root, flags);
3477 }
3478
3479 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3480 {
3481         u64 flags;
3482         u64 ret;
3483
3484         if (data)
3485                 flags = BTRFS_BLOCK_GROUP_DATA;
3486         else if (root == root->fs_info->chunk_root)
3487                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3488         else
3489                 flags = BTRFS_BLOCK_GROUP_METADATA;
3490
3491         ret = get_alloc_profile(root, flags);
3492         return ret;
3493 }
3494
3495 /*
3496  * This will check the space that the inode allocates from to make sure we have
3497  * enough space for bytes.
3498  */
3499 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3500 {
3501         struct btrfs_space_info *data_sinfo;
3502         struct btrfs_root *root = BTRFS_I(inode)->root;
3503         struct btrfs_fs_info *fs_info = root->fs_info;
3504         u64 used;
3505         int ret = 0, committed = 0, alloc_chunk = 1;
3506
3507         /* make sure bytes are sectorsize aligned */
3508         bytes = ALIGN(bytes, root->sectorsize);
3509
3510         if (root == root->fs_info->tree_root ||
3511             BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3512                 alloc_chunk = 0;
3513                 committed = 1;
3514         }
3515
3516         data_sinfo = fs_info->data_sinfo;
3517         if (!data_sinfo)
3518                 goto alloc;
3519
3520 again:
3521         /* make sure we have enough space to handle the data first */
3522         spin_lock(&data_sinfo->lock);
3523         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3524                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3525                 data_sinfo->bytes_may_use;
3526
3527         if (used + bytes > data_sinfo->total_bytes) {
3528                 struct btrfs_trans_handle *trans;
3529
3530                 /*
3531                  * if we don't have enough free bytes in this space then we need
3532                  * to alloc a new chunk.
3533                  */
3534                 if (!data_sinfo->full && alloc_chunk) {
3535                         u64 alloc_target;
3536
3537                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3538                         spin_unlock(&data_sinfo->lock);
3539 alloc:
3540                         alloc_target = btrfs_get_alloc_profile(root, 1);
3541                         trans = btrfs_join_transaction(root);
3542                         if (IS_ERR(trans))
3543                                 return PTR_ERR(trans);
3544
3545                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3546                                              alloc_target,
3547                                              CHUNK_ALLOC_NO_FORCE);
3548                         btrfs_end_transaction(trans, root);
3549                         if (ret < 0) {
3550                                 if (ret != -ENOSPC)
3551                                         return ret;
3552                                 else
3553                                         goto commit_trans;
3554                         }
3555
3556                         if (!data_sinfo)
3557                                 data_sinfo = fs_info->data_sinfo;
3558
3559                         goto again;
3560                 }
3561
3562                 /*
3563                  * If we have less pinned bytes than we want to allocate then
3564                  * don't bother committing the transaction, it won't help us.
3565                  */
3566                 if (data_sinfo->bytes_pinned < bytes)
3567                         committed = 1;
3568                 spin_unlock(&data_sinfo->lock);
3569
3570                 /* commit the current transaction and try again */
3571 commit_trans:
3572                 if (!committed &&
3573                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3574                         committed = 1;
3575                         trans = btrfs_join_transaction(root);
3576                         if (IS_ERR(trans))
3577                                 return PTR_ERR(trans);
3578                         ret = btrfs_commit_transaction(trans, root);
3579                         if (ret)
3580                                 return ret;
3581                         goto again;
3582                 }
3583
3584                 return -ENOSPC;
3585         }
3586         data_sinfo->bytes_may_use += bytes;
3587         trace_btrfs_space_reservation(root->fs_info, "space_info",
3588                                       data_sinfo->flags, bytes, 1);
3589         spin_unlock(&data_sinfo->lock);
3590
3591         return 0;
3592 }
3593
3594 /*
3595  * Called if we need to clear a data reservation for this inode.
3596  */
3597 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3598 {
3599         struct btrfs_root *root = BTRFS_I(inode)->root;
3600         struct btrfs_space_info *data_sinfo;
3601
3602         /* make sure bytes are sectorsize aligned */
3603         bytes = ALIGN(bytes, root->sectorsize);
3604
3605         data_sinfo = root->fs_info->data_sinfo;
3606         spin_lock(&data_sinfo->lock);
3607         data_sinfo->bytes_may_use -= bytes;
3608         trace_btrfs_space_reservation(root->fs_info, "space_info",
3609                                       data_sinfo->flags, bytes, 0);
3610         spin_unlock(&data_sinfo->lock);
3611 }
3612
3613 static void force_metadata_allocation(struct btrfs_fs_info *info)
3614 {
3615         struct list_head *head = &info->space_info;
3616         struct btrfs_space_info *found;
3617
3618         rcu_read_lock();
3619         list_for_each_entry_rcu(found, head, list) {
3620                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3621                         found->force_alloc = CHUNK_ALLOC_FORCE;
3622         }
3623         rcu_read_unlock();
3624 }
3625
3626 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
3627 {
3628         return (global->size << 1);
3629 }
3630
3631 static int should_alloc_chunk(struct btrfs_root *root,
3632                               struct btrfs_space_info *sinfo, int force)
3633 {
3634         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3635         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3636         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3637         u64 thresh;
3638
3639         if (force == CHUNK_ALLOC_FORCE)
3640                 return 1;
3641
3642         /*
3643          * We need to take into account the global rsv because for all intents
3644          * and purposes it's used space.  Don't worry about locking the
3645          * global_rsv, it doesn't change except when the transaction commits.
3646          */
3647         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3648                 num_allocated += calc_global_rsv_need_space(global_rsv);
3649
3650         /*
3651          * in limited mode, we want to have some free space up to
3652          * about 1% of the FS size.
3653          */
3654         if (force == CHUNK_ALLOC_LIMITED) {
3655                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3656                 thresh = max_t(u64, 64 * 1024 * 1024,
3657                                div_factor_fine(thresh, 1));
3658
3659                 if (num_bytes - num_allocated < thresh)
3660                         return 1;
3661         }
3662
3663         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3664                 return 0;
3665         return 1;
3666 }
3667
3668 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3669 {
3670         u64 num_dev;
3671
3672         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3673                     BTRFS_BLOCK_GROUP_RAID0 |
3674                     BTRFS_BLOCK_GROUP_RAID5 |
3675                     BTRFS_BLOCK_GROUP_RAID6))
3676                 num_dev = root->fs_info->fs_devices->rw_devices;
3677         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3678                 num_dev = 2;
3679         else
3680                 num_dev = 1;    /* DUP or single */
3681
3682         /* metadata for updaing devices and chunk tree */
3683         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3684 }
3685
3686 static void check_system_chunk(struct btrfs_trans_handle *trans,
3687                                struct btrfs_root *root, u64 type)
3688 {
3689         struct btrfs_space_info *info;
3690         u64 left;
3691         u64 thresh;
3692
3693         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3694         spin_lock(&info->lock);
3695         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3696                 info->bytes_reserved - info->bytes_readonly;
3697         spin_unlock(&info->lock);
3698
3699         thresh = get_system_chunk_thresh(root, type);
3700         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3701                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
3702                         left, thresh, type);
3703                 dump_space_info(info, 0, 0);
3704         }
3705
3706         if (left < thresh) {
3707                 u64 flags;
3708
3709                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3710                 btrfs_alloc_chunk(trans, root, flags);
3711         }
3712 }
3713
3714 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3715                           struct btrfs_root *extent_root, u64 flags, int force)
3716 {
3717         struct btrfs_space_info *space_info;
3718         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3719         int wait_for_alloc = 0;
3720         int ret = 0;
3721
3722         /* Don't re-enter if we're already allocating a chunk */
3723         if (trans->allocating_chunk)
3724                 return -ENOSPC;
3725
3726         space_info = __find_space_info(extent_root->fs_info, flags);
3727         if (!space_info) {
3728                 ret = update_space_info(extent_root->fs_info, flags,
3729                                         0, 0, &space_info);
3730                 BUG_ON(ret); /* -ENOMEM */
3731         }
3732         BUG_ON(!space_info); /* Logic error */
3733
3734 again:
3735         spin_lock(&space_info->lock);
3736         if (force < space_info->force_alloc)
3737                 force = space_info->force_alloc;
3738         if (space_info->full) {
3739                 spin_unlock(&space_info->lock);
3740                 return 0;
3741         }
3742
3743         if (!should_alloc_chunk(extent_root, space_info, force)) {
3744                 spin_unlock(&space_info->lock);
3745                 return 0;
3746         } else if (space_info->chunk_alloc) {
3747                 wait_for_alloc = 1;
3748         } else {
3749                 space_info->chunk_alloc = 1;
3750         }
3751
3752         spin_unlock(&space_info->lock);
3753
3754         mutex_lock(&fs_info->chunk_mutex);
3755
3756         /*
3757          * The chunk_mutex is held throughout the entirety of a chunk
3758          * allocation, so once we've acquired the chunk_mutex we know that the
3759          * other guy is done and we need to recheck and see if we should
3760          * allocate.
3761          */
3762         if (wait_for_alloc) {
3763                 mutex_unlock(&fs_info->chunk_mutex);
3764                 wait_for_alloc = 0;
3765                 goto again;
3766         }
3767
3768         trans->allocating_chunk = true;
3769
3770         /*
3771          * If we have mixed data/metadata chunks we want to make sure we keep
3772          * allocating mixed chunks instead of individual chunks.
3773          */
3774         if (btrfs_mixed_space_info(space_info))
3775                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3776
3777         /*
3778          * if we're doing a data chunk, go ahead and make sure that
3779          * we keep a reasonable number of metadata chunks allocated in the
3780          * FS as well.
3781          */
3782         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3783                 fs_info->data_chunk_allocations++;
3784                 if (!(fs_info->data_chunk_allocations %
3785                       fs_info->metadata_ratio))
3786                         force_metadata_allocation(fs_info);
3787         }
3788
3789         /*
3790          * Check if we have enough space in SYSTEM chunk because we may need
3791          * to update devices.
3792          */
3793         check_system_chunk(trans, extent_root, flags);
3794
3795         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3796         trans->allocating_chunk = false;
3797
3798         spin_lock(&space_info->lock);
3799         if (ret < 0 && ret != -ENOSPC)
3800                 goto out;
3801         if (ret)
3802                 space_info->full = 1;
3803         else
3804                 ret = 1;
3805
3806         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3807 out:
3808         space_info->chunk_alloc = 0;
3809         spin_unlock(&space_info->lock);
3810         mutex_unlock(&fs_info->chunk_mutex);
3811         return ret;
3812 }
3813
3814 static int can_overcommit(struct btrfs_root *root,
3815                           struct btrfs_space_info *space_info, u64 bytes,
3816                           enum btrfs_reserve_flush_enum flush)
3817 {
3818         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3819         u64 profile = btrfs_get_alloc_profile(root, 0);
3820         u64 space_size;
3821         u64 avail;
3822         u64 used;
3823         u64 to_add;
3824
3825         used = space_info->bytes_used + space_info->bytes_reserved +
3826                 space_info->bytes_pinned + space_info->bytes_readonly;
3827
3828         /*
3829          * We only want to allow over committing if we have lots of actual space
3830          * free, but if we don't have enough space to handle the global reserve
3831          * space then we could end up having a real enospc problem when trying
3832          * to allocate a chunk or some other such important allocation.
3833          */
3834         spin_lock(&global_rsv->lock);
3835         space_size = calc_global_rsv_need_space(global_rsv);
3836         spin_unlock(&global_rsv->lock);
3837         if (used + space_size >= space_info->total_bytes)
3838                 return 0;
3839
3840         used += space_info->bytes_may_use;
3841
3842         spin_lock(&root->fs_info->free_chunk_lock);
3843         avail = root->fs_info->free_chunk_space;
3844         spin_unlock(&root->fs_info->free_chunk_lock);
3845
3846         /*
3847          * If we have dup, raid1 or raid10 then only half of the free
3848          * space is actually useable.  For raid56, the space info used
3849          * doesn't include the parity drive, so we don't have to
3850          * change the math
3851          */
3852         if (profile & (BTRFS_BLOCK_GROUP_DUP |
3853                        BTRFS_BLOCK_GROUP_RAID1 |
3854                        BTRFS_BLOCK_GROUP_RAID10))
3855                 avail >>= 1;
3856
3857         to_add = space_info->total_bytes;
3858
3859         /*
3860          * If we aren't flushing all things, let us overcommit up to
3861          * 1/2th of the space. If we can flush, don't let us overcommit
3862          * too much, let it overcommit up to 1/8 of the space.
3863          */
3864         if (flush == BTRFS_RESERVE_FLUSH_ALL)
3865                 to_add >>= 3;
3866         else
3867                 to_add >>= 1;
3868
3869         /*
3870          * Limit the overcommit to the amount of free space we could possibly
3871          * allocate for chunks.
3872          */
3873         to_add = min(avail, to_add);
3874
3875         if (used + bytes < space_info->total_bytes + to_add)
3876                 return 1;
3877         return 0;
3878 }
3879
3880 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
3881                                          unsigned long nr_pages)
3882 {
3883         struct super_block *sb = root->fs_info->sb;
3884         int started;
3885
3886         /* If we can not start writeback, just sync all the delalloc file. */
3887         started = try_to_writeback_inodes_sb_nr(sb, nr_pages,
3888                                                       WB_REASON_FS_FREE_SPACE);
3889         if (!started) {
3890                 /*
3891                  * We needn't worry the filesystem going from r/w to r/o though
3892                  * we don't acquire ->s_umount mutex, because the filesystem
3893                  * should guarantee the delalloc inodes list be empty after
3894                  * the filesystem is readonly(all dirty pages are written to
3895                  * the disk).
3896                  */
3897                 btrfs_start_delalloc_inodes(root, 0);
3898                 if (!current->journal_info)
3899                         btrfs_wait_ordered_extents(root, 0);
3900         }
3901 }
3902
3903 /*
3904  * shrink metadata reservation for delalloc
3905  */
3906 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
3907                             bool wait_ordered)
3908 {
3909         struct btrfs_block_rsv *block_rsv;
3910         struct btrfs_space_info *space_info;
3911         struct btrfs_trans_handle *trans;
3912         u64 delalloc_bytes;
3913         u64 max_reclaim;
3914         long time_left;
3915         unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3916         int loops = 0;
3917         enum btrfs_reserve_flush_enum flush;
3918
3919         trans = (struct btrfs_trans_handle *)current->journal_info;
3920         block_rsv = &root->fs_info->delalloc_block_rsv;
3921         space_info = block_rsv->space_info;
3922
3923         smp_mb();
3924         delalloc_bytes = percpu_counter_sum_positive(
3925                                                 &root->fs_info->delalloc_bytes);
3926         if (delalloc_bytes == 0) {
3927                 if (trans)
3928                         return;
3929                 btrfs_wait_ordered_extents(root, 0);
3930                 return;
3931         }
3932
3933         while (delalloc_bytes && loops < 3) {
3934                 max_reclaim = min(delalloc_bytes, to_reclaim);
3935                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
3936                 btrfs_writeback_inodes_sb_nr(root, nr_pages);
3937                 /*
3938                  * We need to wait for the async pages to actually start before
3939                  * we do anything.
3940                  */
3941                 wait_event(root->fs_info->async_submit_wait,
3942                            !atomic_read(&root->fs_info->async_delalloc_pages));
3943
3944                 if (!trans)
3945                         flush = BTRFS_RESERVE_FLUSH_ALL;
3946                 else
3947                         flush = BTRFS_RESERVE_NO_FLUSH;
3948                 spin_lock(&space_info->lock);
3949                 if (can_overcommit(root, space_info, orig, flush)) {
3950                         spin_unlock(&space_info->lock);
3951                         break;
3952                 }
3953                 spin_unlock(&space_info->lock);
3954
3955                 loops++;
3956                 if (wait_ordered && !trans) {
3957                         btrfs_wait_ordered_extents(root, 0);
3958                 } else {
3959                         time_left = schedule_timeout_killable(1);
3960                         if (time_left)
3961                                 break;
3962                 }
3963                 smp_mb();
3964                 delalloc_bytes = percpu_counter_sum_positive(
3965                                                 &root->fs_info->delalloc_bytes);
3966         }
3967 }
3968
3969 /**
3970  * maybe_commit_transaction - possibly commit the transaction if its ok to
3971  * @root - the root we're allocating for
3972  * @bytes - the number of bytes we want to reserve
3973  * @force - force the commit
3974  *
3975  * This will check to make sure that committing the transaction will actually
3976  * get us somewhere and then commit the transaction if it does.  Otherwise it
3977  * will return -ENOSPC.
3978  */
3979 static int may_commit_transaction(struct btrfs_root *root,
3980                                   struct btrfs_space_info *space_info,
3981                                   u64 bytes, int force)
3982 {
3983         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
3984         struct btrfs_trans_handle *trans;
3985
3986         trans = (struct btrfs_trans_handle *)current->journal_info;
3987         if (trans)
3988                 return -EAGAIN;
3989
3990         if (force)
3991                 goto commit;
3992
3993         /* See if there is enough pinned space to make this reservation */
3994         spin_lock(&space_info->lock);
3995         if (space_info->bytes_pinned >= bytes) {
3996                 spin_unlock(&space_info->lock);
3997                 goto commit;
3998         }
3999         spin_unlock(&space_info->lock);
4000
4001         /*
4002          * See if there is some space in the delayed insertion reservation for
4003          * this reservation.
4004          */
4005         if (space_info != delayed_rsv->space_info)
4006                 return -ENOSPC;
4007
4008         spin_lock(&space_info->lock);
4009         spin_lock(&delayed_rsv->lock);
4010         if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
4011                 spin_unlock(&delayed_rsv->lock);
4012                 spin_unlock(&space_info->lock);
4013                 return -ENOSPC;
4014         }
4015         spin_unlock(&delayed_rsv->lock);
4016         spin_unlock(&space_info->lock);
4017
4018 commit:
4019         trans = btrfs_join_transaction(root);
4020         if (IS_ERR(trans))
4021                 return -ENOSPC;
4022
4023         return btrfs_commit_transaction(trans, root);
4024 }
4025
4026 enum flush_state {
4027         FLUSH_DELAYED_ITEMS_NR  =       1,
4028         FLUSH_DELAYED_ITEMS     =       2,
4029         FLUSH_DELALLOC          =       3,
4030         FLUSH_DELALLOC_WAIT     =       4,
4031         ALLOC_CHUNK             =       5,
4032         COMMIT_TRANS            =       6,
4033 };
4034
4035 static int flush_space(struct btrfs_root *root,
4036                        struct btrfs_space_info *space_info, u64 num_bytes,
4037                        u64 orig_bytes, int state)
4038 {
4039         struct btrfs_trans_handle *trans;
4040         int nr;
4041         int ret = 0;
4042
4043         switch (state) {
4044         case FLUSH_DELAYED_ITEMS_NR:
4045         case FLUSH_DELAYED_ITEMS:
4046                 if (state == FLUSH_DELAYED_ITEMS_NR) {
4047                         u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
4048
4049                         nr = (int)div64_u64(num_bytes, bytes);
4050                         if (!nr)
4051                                 nr = 1;
4052                         nr *= 2;
4053                 } else {
4054                         nr = -1;
4055                 }
4056                 trans = btrfs_join_transaction(root);
4057                 if (IS_ERR(trans)) {
4058                         ret = PTR_ERR(trans);
4059                         break;
4060                 }
4061                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4062                 btrfs_end_transaction(trans, root);
4063                 break;
4064         case FLUSH_DELALLOC:
4065         case FLUSH_DELALLOC_WAIT:
4066                 shrink_delalloc(root, num_bytes, orig_bytes,
4067                                 state == FLUSH_DELALLOC_WAIT);
4068                 break;
4069         case ALLOC_CHUNK:
4070                 trans = btrfs_join_transaction(root);
4071                 if (IS_ERR(trans)) {
4072                         ret = PTR_ERR(trans);
4073                         break;
4074                 }
4075                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4076                                      btrfs_get_alloc_profile(root, 0),
4077                                      CHUNK_ALLOC_NO_FORCE);
4078                 btrfs_end_transaction(trans, root);
4079                 if (ret == -ENOSPC)
4080                         ret = 0;
4081                 break;
4082         case COMMIT_TRANS:
4083                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4084                 break;
4085         default:
4086                 ret = -ENOSPC;
4087                 break;
4088         }
4089
4090         return ret;
4091 }
4092 /**
4093  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4094  * @root - the root we're allocating for
4095  * @block_rsv - the block_rsv we're allocating for
4096  * @orig_bytes - the number of bytes we want
4097  * @flush - whether or not we can flush to make our reservation
4098  *
4099  * This will reserve orgi_bytes number of bytes from the space info associated
4100  * with the block_rsv.  If there is not enough space it will make an attempt to
4101  * flush out space to make room.  It will do this by flushing delalloc if
4102  * possible or committing the transaction.  If flush is 0 then no attempts to
4103  * regain reservations will be made and this will fail if there is not enough
4104  * space already.
4105  */
4106 static int reserve_metadata_bytes(struct btrfs_root *root,
4107                                   struct btrfs_block_rsv *block_rsv,
4108                                   u64 orig_bytes,
4109                                   enum btrfs_reserve_flush_enum flush)
4110 {
4111         struct btrfs_space_info *space_info = block_rsv->space_info;
4112         u64 used;
4113         u64 num_bytes = orig_bytes;
4114         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4115         int ret = 0;
4116         bool flushing = false;
4117
4118 again:
4119         ret = 0;
4120         spin_lock(&space_info->lock);
4121         /*
4122          * We only want to wait if somebody other than us is flushing and we
4123          * are actually allowed to flush all things.
4124          */
4125         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4126                space_info->flush) {
4127                 spin_unlock(&space_info->lock);
4128                 /*
4129                  * If we have a trans handle we can't wait because the flusher
4130                  * may have to commit the transaction, which would mean we would
4131                  * deadlock since we are waiting for the flusher to finish, but
4132                  * hold the current transaction open.
4133                  */
4134                 if (current->journal_info)
4135                         return -EAGAIN;
4136                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4137                 /* Must have been killed, return */
4138                 if (ret)
4139                         return -EINTR;
4140
4141                 spin_lock(&space_info->lock);
4142         }
4143
4144         ret = -ENOSPC;
4145         used = space_info->bytes_used + space_info->bytes_reserved +
4146                 space_info->bytes_pinned + space_info->bytes_readonly +
4147                 space_info->bytes_may_use;
4148
4149         /*
4150          * The idea here is that we've not already over-reserved the block group
4151          * then we can go ahead and save our reservation first and then start
4152          * flushing if we need to.  Otherwise if we've already overcommitted
4153          * lets start flushing stuff first and then come back and try to make
4154          * our reservation.
4155          */
4156         if (used <= space_info->total_bytes) {
4157                 if (used + orig_bytes <= space_info->total_bytes) {
4158                         space_info->bytes_may_use += orig_bytes;
4159                         trace_btrfs_space_reservation(root->fs_info,
4160                                 "space_info", space_info->flags, orig_bytes, 1);
4161                         ret = 0;
4162                 } else {
4163                         /*
4164                          * Ok set num_bytes to orig_bytes since we aren't
4165                          * overocmmitted, this way we only try and reclaim what
4166                          * we need.
4167                          */
4168                         num_bytes = orig_bytes;
4169                 }
4170         } else {
4171                 /*
4172                  * Ok we're over committed, set num_bytes to the overcommitted
4173                  * amount plus the amount of bytes that we need for this
4174                  * reservation.
4175                  */
4176                 num_bytes = used - space_info->total_bytes +
4177                         (orig_bytes * 2);
4178         }
4179
4180         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4181                 space_info->bytes_may_use += orig_bytes;
4182                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4183                                               space_info->flags, orig_bytes,
4184                                               1);
4185                 ret = 0;
4186         }
4187
4188         /*
4189          * Couldn't make our reservation, save our place so while we're trying
4190          * to reclaim space we can actually use it instead of somebody else
4191          * stealing it from us.
4192          *
4193          * We make the other tasks wait for the flush only when we can flush
4194          * all things.
4195          */
4196         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4197                 flushing = true;
4198                 space_info->flush = 1;
4199         }
4200
4201         spin_unlock(&space_info->lock);
4202
4203         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4204                 goto out;
4205
4206         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4207                           flush_state);
4208         flush_state++;
4209
4210         /*
4211          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4212          * would happen. So skip delalloc flush.
4213          */
4214         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4215             (flush_state == FLUSH_DELALLOC ||
4216              flush_state == FLUSH_DELALLOC_WAIT))
4217                 flush_state = ALLOC_CHUNK;
4218
4219         if (!ret)
4220                 goto again;
4221         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4222                  flush_state < COMMIT_TRANS)
4223                 goto again;
4224         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4225                  flush_state <= COMMIT_TRANS)
4226                 goto again;
4227
4228 out:
4229         if (ret == -ENOSPC &&
4230             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4231                 struct btrfs_block_rsv *global_rsv =
4232                         &root->fs_info->global_block_rsv;
4233
4234                 if (block_rsv != global_rsv &&
4235                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4236                         ret = 0;
4237         }
4238         if (flushing) {
4239                 spin_lock(&space_info->lock);
4240                 space_info->flush = 0;
4241                 wake_up_all(&space_info->wait);
4242                 spin_unlock(&space_info->lock);
4243         }
4244         return ret;
4245 }
4246
4247 static struct btrfs_block_rsv *get_block_rsv(
4248                                         const struct btrfs_trans_handle *trans,
4249                                         const struct btrfs_root *root)
4250 {
4251         struct btrfs_block_rsv *block_rsv = NULL;
4252
4253         if (root->ref_cows)
4254                 block_rsv = trans->block_rsv;
4255
4256         if (root == root->fs_info->csum_root && trans->adding_csums)
4257                 block_rsv = trans->block_rsv;
4258
4259         if (!block_rsv)
4260                 block_rsv = root->block_rsv;
4261
4262         if (!block_rsv)
4263                 block_rsv = &root->fs_info->empty_block_rsv;
4264
4265         return block_rsv;
4266 }
4267
4268 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4269                                u64 num_bytes)
4270 {
4271         int ret = -ENOSPC;
4272         spin_lock(&block_rsv->lock);
4273         if (block_rsv->reserved >= num_bytes) {
4274                 block_rsv->reserved -= num_bytes;
4275                 if (block_rsv->reserved < block_rsv->size)
4276                         block_rsv->full = 0;
4277                 ret = 0;
4278         }
4279         spin_unlock(&block_rsv->lock);
4280         return ret;
4281 }
4282
4283 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4284                                 u64 num_bytes, int update_size)
4285 {
4286         spin_lock(&block_rsv->lock);
4287         block_rsv->reserved += num_bytes;
4288         if (update_size)
4289                 block_rsv->size += num_bytes;
4290         else if (block_rsv->reserved >= block_rsv->size)
4291                 block_rsv->full = 1;
4292         spin_unlock(&block_rsv->lock);
4293 }
4294
4295 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4296                                     struct btrfs_block_rsv *block_rsv,
4297                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4298 {
4299         struct btrfs_space_info *space_info = block_rsv->space_info;
4300
4301         spin_lock(&block_rsv->lock);
4302         if (num_bytes == (u64)-1)
4303                 num_bytes = block_rsv->size;
4304         block_rsv->size -= num_bytes;
4305         if (block_rsv->reserved >= block_rsv->size) {
4306                 num_bytes = block_rsv->reserved - block_rsv->size;
4307                 block_rsv->reserved = block_rsv->size;
4308                 block_rsv->full = 1;
4309         } else {
4310                 num_bytes = 0;
4311         }
4312         spin_unlock(&block_rsv->lock);
4313
4314         if (num_bytes > 0) {
4315                 if (dest) {
4316                         spin_lock(&dest->lock);
4317                         if (!dest->full) {
4318                                 u64 bytes_to_add;
4319
4320                                 bytes_to_add = dest->size - dest->reserved;
4321                                 bytes_to_add = min(num_bytes, bytes_to_add);
4322                                 dest->reserved += bytes_to_add;
4323                                 if (dest->reserved >= dest->size)
4324                                         dest->full = 1;
4325                                 num_bytes -= bytes_to_add;
4326                         }
4327                         spin_unlock(&dest->lock);
4328                 }
4329                 if (num_bytes) {
4330                         spin_lock(&space_info->lock);
4331                         space_info->bytes_may_use -= num_bytes;
4332                         trace_btrfs_space_reservation(fs_info, "space_info",
4333                                         space_info->flags, num_bytes, 0);
4334                         space_info->reservation_progress++;
4335                         spin_unlock(&space_info->lock);
4336                 }
4337         }
4338 }
4339
4340 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4341                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4342 {
4343         int ret;
4344
4345         ret = block_rsv_use_bytes(src, num_bytes);
4346         if (ret)
4347                 return ret;
4348
4349         block_rsv_add_bytes(dst, num_bytes, 1);
4350         return 0;
4351 }
4352
4353 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4354 {
4355         memset(rsv, 0, sizeof(*rsv));
4356         spin_lock_init(&rsv->lock);
4357         rsv->type = type;
4358 }
4359
4360 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4361                                               unsigned short type)
4362 {
4363         struct btrfs_block_rsv *block_rsv;
4364         struct btrfs_fs_info *fs_info = root->fs_info;
4365
4366         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4367         if (!block_rsv)
4368                 return NULL;
4369
4370         btrfs_init_block_rsv(block_rsv, type);
4371         block_rsv->space_info = __find_space_info(fs_info,
4372                                                   BTRFS_BLOCK_GROUP_METADATA);
4373         return block_rsv;
4374 }
4375
4376 void btrfs_free_block_rsv(struct btrfs_root *root,
4377                           struct btrfs_block_rsv *rsv)
4378 {
4379         if (!rsv)
4380                 return;
4381         btrfs_block_rsv_release(root, rsv, (u64)-1);
4382         kfree(rsv);
4383 }
4384
4385 int btrfs_block_rsv_add(struct btrfs_root *root,
4386                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4387                         enum btrfs_reserve_flush_enum flush)
4388 {
4389         int ret;
4390
4391         if (num_bytes == 0)
4392                 return 0;
4393
4394         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4395         if (!ret) {
4396                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4397                 return 0;
4398         }
4399
4400         return ret;
4401 }
4402
4403 int btrfs_block_rsv_check(struct btrfs_root *root,
4404                           struct btrfs_block_rsv *block_rsv, int min_factor)
4405 {
4406         u64 num_bytes = 0;
4407         int ret = -ENOSPC;
4408
4409         if (!block_rsv)
4410                 return 0;
4411
4412         spin_lock(&block_rsv->lock);
4413         num_bytes = div_factor(block_rsv->size, min_factor);
4414         if (block_rsv->reserved >= num_bytes)
4415                 ret = 0;
4416         spin_unlock(&block_rsv->lock);
4417
4418         return ret;
4419 }
4420
4421 int btrfs_block_rsv_refill(struct btrfs_root *root,
4422                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4423                            enum btrfs_reserve_flush_enum flush)
4424 {
4425         u64 num_bytes = 0;
4426         int ret = -ENOSPC;
4427
4428         if (!block_rsv)
4429                 return 0;
4430
4431         spin_lock(&block_rsv->lock);
4432         num_bytes = min_reserved;
4433         if (block_rsv->reserved >= num_bytes)
4434                 ret = 0;
4435         else
4436                 num_bytes -= block_rsv->reserved;
4437         spin_unlock(&block_rsv->lock);
4438
4439         if (!ret)
4440                 return 0;
4441
4442         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4443         if (!ret) {
4444                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4445                 return 0;
4446         }
4447
4448         return ret;
4449 }
4450
4451 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4452                             struct btrfs_block_rsv *dst_rsv,
4453                             u64 num_bytes)
4454 {
4455         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4456 }
4457
4458 void btrfs_block_rsv_release(struct btrfs_root *root,
4459                              struct btrfs_block_rsv *block_rsv,
4460                              u64 num_bytes)
4461 {
4462         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4463         if (global_rsv->full || global_rsv == block_rsv ||
4464             block_rsv->space_info != global_rsv->space_info)
4465                 global_rsv = NULL;
4466         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4467                                 num_bytes);
4468 }
4469
4470 /*
4471  * helper to calculate size of global block reservation.
4472  * the desired value is sum of space used by extent tree,
4473  * checksum tree and root tree
4474  */
4475 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4476 {
4477         struct btrfs_space_info *sinfo;
4478         u64 num_bytes;
4479         u64 meta_used;
4480         u64 data_used;
4481         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4482
4483         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4484         spin_lock(&sinfo->lock);
4485         data_used = sinfo->bytes_used;
4486         spin_unlock(&sinfo->lock);
4487
4488         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4489         spin_lock(&sinfo->lock);
4490         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4491                 data_used = 0;
4492         meta_used = sinfo->bytes_used;
4493         spin_unlock(&sinfo->lock);
4494
4495         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4496                     csum_size * 2;
4497         num_bytes += div64_u64(data_used + meta_used, 50);
4498
4499         if (num_bytes * 3 > meta_used)
4500                 num_bytes = div64_u64(meta_used, 3);
4501
4502         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4503 }
4504
4505 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4506 {
4507         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4508         struct btrfs_space_info *sinfo = block_rsv->space_info;
4509         u64 num_bytes;
4510
4511         num_bytes = calc_global_metadata_size(fs_info);
4512
4513         spin_lock(&sinfo->lock);
4514         spin_lock(&block_rsv->lock);
4515
4516         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4517
4518         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4519                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4520                     sinfo->bytes_may_use;
4521
4522         if (sinfo->total_bytes > num_bytes) {
4523                 num_bytes = sinfo->total_bytes - num_bytes;
4524                 block_rsv->reserved += num_bytes;
4525                 sinfo->bytes_may_use += num_bytes;
4526                 trace_btrfs_space_reservation(fs_info, "space_info",
4527                                       sinfo->flags, num_bytes, 1);
4528         }
4529
4530         if (block_rsv->reserved >= block_rsv->size) {
4531                 num_bytes = block_rsv->reserved - block_rsv->size;
4532                 sinfo->bytes_may_use -= num_bytes;
4533                 trace_btrfs_space_reservation(fs_info, "space_info",
4534                                       sinfo->flags, num_bytes, 0);
4535                 sinfo->reservation_progress++;
4536                 block_rsv->reserved = block_rsv->size;
4537                 block_rsv->full = 1;
4538         }
4539
4540         spin_unlock(&block_rsv->lock);
4541         spin_unlock(&sinfo->lock);
4542 }
4543
4544 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4545 {
4546         struct btrfs_space_info *space_info;
4547
4548         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4549         fs_info->chunk_block_rsv.space_info = space_info;
4550
4551         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4552         fs_info->global_block_rsv.space_info = space_info;
4553         fs_info->delalloc_block_rsv.space_info = space_info;
4554         fs_info->trans_block_rsv.space_info = space_info;
4555         fs_info->empty_block_rsv.space_info = space_info;
4556         fs_info->delayed_block_rsv.space_info = space_info;
4557
4558         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4559         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4560         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4561         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4562         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4563
4564         update_global_block_rsv(fs_info);
4565 }
4566
4567 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4568 {
4569         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4570                                 (u64)-1);
4571         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4572         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4573         WARN_ON(fs_info->trans_block_rsv.size > 0);
4574         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4575         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4576         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4577         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4578         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4579 }
4580
4581 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4582                                   struct btrfs_root *root)
4583 {
4584         if (!trans->block_rsv)
4585                 return;
4586
4587         if (!trans->bytes_reserved)
4588                 return;
4589
4590         trace_btrfs_space_reservation(root->fs_info, "transaction",
4591                                       trans->transid, trans->bytes_reserved, 0);
4592         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4593         trans->bytes_reserved = 0;
4594 }
4595
4596 /* Can only return 0 or -ENOSPC */
4597 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4598                                   struct inode *inode)
4599 {
4600         struct btrfs_root *root = BTRFS_I(inode)->root;
4601         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4602         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4603
4604         /*
4605          * We need to hold space in order to delete our orphan item once we've
4606          * added it, so this takes the reservation so we can release it later
4607          * when we are truly done with the orphan item.
4608          */
4609         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4610         trace_btrfs_space_reservation(root->fs_info, "orphan",
4611                                       btrfs_ino(inode), num_bytes, 1);
4612         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4613 }
4614
4615 void btrfs_orphan_release_metadata(struct inode *inode)
4616 {
4617         struct btrfs_root *root = BTRFS_I(inode)->root;
4618         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4619         trace_btrfs_space_reservation(root->fs_info, "orphan",
4620                                       btrfs_ino(inode), num_bytes, 0);
4621         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4622 }
4623
4624 /*
4625  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4626  * root: the root of the parent directory
4627  * rsv: block reservation
4628  * items: the number of items that we need do reservation
4629  * qgroup_reserved: used to return the reserved size in qgroup
4630  *
4631  * This function is used to reserve the space for snapshot/subvolume
4632  * creation and deletion. Those operations are different with the
4633  * common file/directory operations, they change two fs/file trees
4634  * and root tree, the number of items that the qgroup reserves is
4635  * different with the free space reservation. So we can not use
4636  * the space reseravtion mechanism in start_transaction().
4637  */
4638 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4639                                      struct btrfs_block_rsv *rsv,
4640                                      int items,
4641                                      u64 *qgroup_reserved)
4642 {
4643         u64 num_bytes;
4644         int ret;
4645
4646         if (root->fs_info->quota_enabled) {
4647                 /* One for parent inode, two for dir entries */
4648                 num_bytes = 3 * root->leafsize;
4649                 ret = btrfs_qgroup_reserve(root, num_bytes);
4650                 if (ret)
4651                         return ret;
4652         } else {
4653                 num_bytes = 0;
4654         }
4655
4656         *qgroup_reserved = num_bytes;
4657
4658         num_bytes = btrfs_calc_trans_metadata_size(root, items);
4659         rsv->space_info = __find_space_info(root->fs_info,
4660                                             BTRFS_BLOCK_GROUP_METADATA);
4661         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
4662                                   BTRFS_RESERVE_FLUSH_ALL);
4663         if (ret) {
4664                 if (*qgroup_reserved)
4665                         btrfs_qgroup_free(root, *qgroup_reserved);
4666         }
4667
4668         return ret;
4669 }
4670
4671 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
4672                                       struct btrfs_block_rsv *rsv,
4673                                       u64 qgroup_reserved)
4674 {
4675         btrfs_block_rsv_release(root, rsv, (u64)-1);
4676         if (qgroup_reserved)
4677                 btrfs_qgroup_free(root, qgroup_reserved);
4678 }
4679
4680 /**
4681  * drop_outstanding_extent - drop an outstanding extent
4682  * @inode: the inode we're dropping the extent for
4683  *
4684  * This is called when we are freeing up an outstanding extent, either called
4685  * after an error or after an extent is written.  This will return the number of
4686  * reserved extents that need to be freed.  This must be called with
4687  * BTRFS_I(inode)->lock held.
4688  */
4689 static unsigned drop_outstanding_extent(struct inode *inode)
4690 {
4691         unsigned drop_inode_space = 0;
4692         unsigned dropped_extents = 0;
4693
4694         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4695         BTRFS_I(inode)->outstanding_extents--;
4696
4697         if (BTRFS_I(inode)->outstanding_extents == 0 &&
4698             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4699                                &BTRFS_I(inode)->runtime_flags))
4700                 drop_inode_space = 1;
4701
4702         /*
4703          * If we have more or the same amount of outsanding extents than we have
4704          * reserved then we need to leave the reserved extents count alone.
4705          */
4706         if (BTRFS_I(inode)->outstanding_extents >=
4707             BTRFS_I(inode)->reserved_extents)
4708                 return drop_inode_space;
4709
4710         dropped_extents = BTRFS_I(inode)->reserved_extents -
4711                 BTRFS_I(inode)->outstanding_extents;
4712         BTRFS_I(inode)->reserved_extents -= dropped_extents;
4713         return dropped_extents + drop_inode_space;
4714 }
4715
4716 /**
4717  * calc_csum_metadata_size - return the amount of metada space that must be
4718  *      reserved/free'd for the given bytes.
4719  * @inode: the inode we're manipulating
4720  * @num_bytes: the number of bytes in question
4721  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4722  *
4723  * This adjusts the number of csum_bytes in the inode and then returns the
4724  * correct amount of metadata that must either be reserved or freed.  We
4725  * calculate how many checksums we can fit into one leaf and then divide the
4726  * number of bytes that will need to be checksumed by this value to figure out
4727  * how many checksums will be required.  If we are adding bytes then the number
4728  * may go up and we will return the number of additional bytes that must be
4729  * reserved.  If it is going down we will return the number of bytes that must
4730  * be freed.
4731  *
4732  * This must be called with BTRFS_I(inode)->lock held.
4733  */
4734 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4735                                    int reserve)
4736 {
4737         struct btrfs_root *root = BTRFS_I(inode)->root;
4738         u64 csum_size;
4739         int num_csums_per_leaf;
4740         int num_csums;
4741         int old_csums;
4742
4743         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4744             BTRFS_I(inode)->csum_bytes == 0)
4745                 return 0;
4746
4747         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4748         if (reserve)
4749                 BTRFS_I(inode)->csum_bytes += num_bytes;
4750         else
4751                 BTRFS_I(inode)->csum_bytes -= num_bytes;
4752         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4753         num_csums_per_leaf = (int)div64_u64(csum_size,
4754                                             sizeof(struct btrfs_csum_item) +
4755                                             sizeof(struct btrfs_disk_key));
4756         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4757         num_csums = num_csums + num_csums_per_leaf - 1;
4758         num_csums = num_csums / num_csums_per_leaf;
4759
4760         old_csums = old_csums + num_csums_per_leaf - 1;
4761         old_csums = old_csums / num_csums_per_leaf;
4762
4763         /* No change, no need to reserve more */
4764         if (old_csums == num_csums)
4765                 return 0;
4766
4767         if (reserve)
4768                 return btrfs_calc_trans_metadata_size(root,
4769                                                       num_csums - old_csums);
4770
4771         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4772 }
4773
4774 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4775 {
4776         struct btrfs_root *root = BTRFS_I(inode)->root;
4777         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4778         u64 to_reserve = 0;
4779         u64 csum_bytes;
4780         unsigned nr_extents = 0;
4781         int extra_reserve = 0;
4782         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
4783         int ret = 0;
4784         bool delalloc_lock = true;
4785         u64 to_free = 0;
4786         unsigned dropped;
4787
4788         /* If we are a free space inode we need to not flush since we will be in
4789          * the middle of a transaction commit.  We also don't need the delalloc
4790          * mutex since we won't race with anybody.  We need this mostly to make
4791          * lockdep shut its filthy mouth.
4792          */
4793         if (btrfs_is_free_space_inode(inode)) {
4794                 flush = BTRFS_RESERVE_NO_FLUSH;
4795                 delalloc_lock = false;
4796         }
4797
4798         if (flush != BTRFS_RESERVE_NO_FLUSH &&
4799             btrfs_transaction_in_commit(root->fs_info))
4800                 schedule_timeout(1);
4801
4802         if (delalloc_lock)
4803                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4804
4805         num_bytes = ALIGN(num_bytes, root->sectorsize);
4806
4807         spin_lock(&BTRFS_I(inode)->lock);
4808         BTRFS_I(inode)->outstanding_extents++;
4809
4810         if (BTRFS_I(inode)->outstanding_extents >
4811             BTRFS_I(inode)->reserved_extents)
4812                 nr_extents = BTRFS_I(inode)->outstanding_extents -
4813                         BTRFS_I(inode)->reserved_extents;
4814
4815         /*
4816          * Add an item to reserve for updating the inode when we complete the
4817          * delalloc io.
4818          */
4819         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4820                       &BTRFS_I(inode)->runtime_flags)) {
4821                 nr_extents++;
4822                 extra_reserve = 1;
4823         }
4824
4825         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4826         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4827         csum_bytes = BTRFS_I(inode)->csum_bytes;
4828         spin_unlock(&BTRFS_I(inode)->lock);
4829
4830         if (root->fs_info->quota_enabled) {
4831                 ret = btrfs_qgroup_reserve(root, num_bytes +
4832                                            nr_extents * root->leafsize);
4833                 if (ret)
4834                         goto out_fail;
4835         }
4836
4837         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4838         if (unlikely(ret)) {
4839                 if (root->fs_info->quota_enabled)
4840                         btrfs_qgroup_free(root, num_bytes +
4841                                                 nr_extents * root->leafsize);
4842                 goto out_fail;
4843         }
4844
4845         spin_lock(&BTRFS_I(inode)->lock);
4846         if (extra_reserve) {
4847                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4848                         &BTRFS_I(inode)->runtime_flags);
4849                 nr_extents--;
4850         }
4851         BTRFS_I(inode)->reserved_extents += nr_extents;
4852         spin_unlock(&BTRFS_I(inode)->lock);
4853
4854         if (delalloc_lock)
4855                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4856
4857         if (to_reserve)
4858                 trace_btrfs_space_reservation(root->fs_info,"delalloc",
4859                                               btrfs_ino(inode), to_reserve, 1);
4860         block_rsv_add_bytes(block_rsv, to_reserve, 1);
4861
4862         return 0;
4863
4864 out_fail:
4865         spin_lock(&BTRFS_I(inode)->lock);
4866         dropped = drop_outstanding_extent(inode);
4867         /*
4868          * If the inodes csum_bytes is the same as the original
4869          * csum_bytes then we know we haven't raced with any free()ers
4870          * so we can just reduce our inodes csum bytes and carry on.
4871          */
4872         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
4873                 calc_csum_metadata_size(inode, num_bytes, 0);
4874         } else {
4875                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
4876                 u64 bytes;
4877
4878                 /*
4879                  * This is tricky, but first we need to figure out how much we
4880                  * free'd from any free-ers that occured during this
4881                  * reservation, so we reset ->csum_bytes to the csum_bytes
4882                  * before we dropped our lock, and then call the free for the
4883                  * number of bytes that were freed while we were trying our
4884                  * reservation.
4885                  */
4886                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
4887                 BTRFS_I(inode)->csum_bytes = csum_bytes;
4888                 to_free = calc_csum_metadata_size(inode, bytes, 0);
4889
4890
4891                 /*
4892                  * Now we need to see how much we would have freed had we not
4893                  * been making this reservation and our ->csum_bytes were not
4894                  * artificially inflated.
4895                  */
4896                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
4897                 bytes = csum_bytes - orig_csum_bytes;
4898                 bytes = calc_csum_metadata_size(inode, bytes, 0);
4899
4900                 /*
4901                  * Now reset ->csum_bytes to what it should be.  If bytes is
4902                  * more than to_free then we would have free'd more space had we
4903                  * not had an artificially high ->csum_bytes, so we need to free
4904                  * the remainder.  If bytes is the same or less then we don't
4905                  * need to do anything, the other free-ers did the correct
4906                  * thing.
4907                  */
4908                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
4909                 if (bytes > to_free)
4910                         to_free = bytes - to_free;
4911                 else
4912                         to_free = 0;
4913         }
4914         spin_unlock(&BTRFS_I(inode)->lock);
4915         if (dropped)
4916                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4917
4918         if (to_free) {
4919                 btrfs_block_rsv_release(root, block_rsv, to_free);
4920                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
4921                                               btrfs_ino(inode), to_free, 0);
4922         }
4923         if (delalloc_lock)
4924                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4925         return ret;
4926 }
4927
4928 /**
4929  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
4930  * @inode: the inode to release the reservation for
4931  * @num_bytes: the number of bytes we're releasing
4932  *
4933  * This will release the metadata reservation for an inode.  This can be called
4934  * once we complete IO for a given set of bytes to release their metadata
4935  * reservations.
4936  */
4937 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4938 {
4939         struct btrfs_root *root = BTRFS_I(inode)->root;
4940         u64 to_free = 0;
4941         unsigned dropped;
4942
4943         num_bytes = ALIGN(num_bytes, root->sectorsize);
4944         spin_lock(&BTRFS_I(inode)->lock);
4945         dropped = drop_outstanding_extent(inode);
4946
4947         if (num_bytes)
4948                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4949         spin_unlock(&BTRFS_I(inode)->lock);
4950         if (dropped > 0)
4951                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4952
4953         trace_btrfs_space_reservation(root->fs_info, "delalloc",
4954                                       btrfs_ino(inode), to_free, 0);
4955         if (root->fs_info->quota_enabled) {
4956                 btrfs_qgroup_free(root, num_bytes +
4957                                         dropped * root->leafsize);
4958         }
4959
4960         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4961                                 to_free);
4962 }
4963
4964 /**
4965  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
4966  * @inode: inode we're writing to
4967  * @num_bytes: the number of bytes we want to allocate
4968  *
4969  * This will do the following things
4970  *
4971  * o reserve space in the data space info for num_bytes
4972  * o reserve space in the metadata space info based on number of outstanding
4973  *   extents and how much csums will be needed
4974  * o add to the inodes ->delalloc_bytes
4975  * o add it to the fs_info's delalloc inodes list.
4976  *
4977  * This will return 0 for success and -ENOSPC if there is no space left.
4978  */
4979 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4980 {
4981         int ret;
4982
4983         ret = btrfs_check_data_free_space(inode, num_bytes);
4984         if (ret)
4985                 return ret;
4986
4987         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4988         if (ret) {
4989                 btrfs_free_reserved_data_space(inode, num_bytes);
4990                 return ret;
4991         }
4992
4993         return 0;
4994 }
4995
4996 /**
4997  * btrfs_delalloc_release_space - release data and metadata space for delalloc
4998  * @inode: inode we're releasing space for
4999  * @num_bytes: the number of bytes we want to free up
5000  *
5001  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5002  * called in the case that we don't need the metadata AND data reservations
5003  * anymore.  So if there is an error or we insert an inline extent.
5004  *
5005  * This function will release the metadata space that was not used and will
5006  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5007  * list if there are no delalloc bytes left.
5008  */
5009 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5010 {
5011         btrfs_delalloc_release_metadata(inode, num_bytes);
5012         btrfs_free_reserved_data_space(inode, num_bytes);
5013 }
5014
5015 static int update_block_group(struct btrfs_root *root,
5016                               u64 bytenr, u64 num_bytes, int alloc)
5017 {
5018         struct btrfs_block_group_cache *cache = NULL;
5019         struct btrfs_fs_info *info = root->fs_info;
5020         u64 total = num_bytes;
5021         u64 old_val;
5022         u64 byte_in_group;
5023         int factor;
5024
5025         /* block accounting for super block */
5026         spin_lock(&info->delalloc_lock);
5027         old_val = btrfs_super_bytes_used(info->super_copy);
5028         if (alloc)
5029                 old_val += num_bytes;
5030         else
5031                 old_val -= num_bytes;
5032         btrfs_set_super_bytes_used(info->super_copy, old_val);
5033         spin_unlock(&info->delalloc_lock);
5034
5035         while (total) {
5036                 cache = btrfs_lookup_block_group(info, bytenr);
5037                 if (!cache)
5038                         return -ENOENT;
5039                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5040                                     BTRFS_BLOCK_GROUP_RAID1 |
5041                                     BTRFS_BLOCK_GROUP_RAID10))
5042                         factor = 2;
5043                 else
5044                         factor = 1;
5045                 /*
5046                  * If this block group has free space cache written out, we
5047                  * need to make sure to load it if we are removing space.  This
5048                  * is because we need the unpinning stage to actually add the
5049                  * space back to the block group, otherwise we will leak space.
5050                  */
5051                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5052                         cache_block_group(cache, 1);
5053
5054                 byte_in_group = bytenr - cache->key.objectid;
5055                 WARN_ON(byte_in_group > cache->key.offset);
5056
5057                 spin_lock(&cache->space_info->lock);
5058                 spin_lock(&cache->lock);
5059
5060                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5061                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5062                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5063
5064                 cache->dirty = 1;
5065                 old_val = btrfs_block_group_used(&cache->item);
5066                 num_bytes = min(total, cache->key.offset - byte_in_group);
5067                 if (alloc) {
5068                         old_val += num_bytes;
5069                         btrfs_set_block_group_used(&cache->item, old_val);
5070                         cache->reserved -= num_bytes;
5071                         cache->space_info->bytes_reserved -= num_bytes;
5072                         cache->space_info->bytes_used += num_bytes;
5073                         cache->space_info->disk_used += num_bytes * factor;
5074                         spin_unlock(&cache->lock);
5075                         spin_unlock(&cache->space_info->lock);
5076                 } else {
5077                         old_val -= num_bytes;
5078                         btrfs_set_block_group_used(&cache->item, old_val);
5079                         cache->pinned += num_bytes;
5080                         cache->space_info->bytes_pinned += num_bytes;
5081                         cache->space_info->bytes_used -= num_bytes;
5082                         cache->space_info->disk_used -= num_bytes * factor;
5083                         spin_unlock(&cache->lock);
5084                         spin_unlock(&cache->space_info->lock);
5085
5086                         set_extent_dirty(info->pinned_extents,
5087                                          bytenr, bytenr + num_bytes - 1,
5088                                          GFP_NOFS | __GFP_NOFAIL);
5089                 }
5090                 btrfs_put_block_group(cache);
5091                 total -= num_bytes;
5092                 bytenr += num_bytes;
5093         }
5094         return 0;
5095 }
5096
5097 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5098 {
5099         struct btrfs_block_group_cache *cache;
5100         u64 bytenr;
5101
5102         spin_lock(&root->fs_info->block_group_cache_lock);
5103         bytenr = root->fs_info->first_logical_byte;
5104         spin_unlock(&root->fs_info->block_group_cache_lock);
5105
5106         if (bytenr < (u64)-1)
5107                 return bytenr;
5108
5109         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5110         if (!cache)
5111                 return 0;
5112
5113         bytenr = cache->key.objectid;
5114         btrfs_put_block_group(cache);
5115
5116         return bytenr;
5117 }
5118
5119 static int pin_down_extent(struct btrfs_root *root,
5120                            struct btrfs_block_group_cache *cache,
5121                            u64 bytenr, u64 num_bytes, int reserved)
5122 {
5123         spin_lock(&cache->space_info->lock);
5124         spin_lock(&cache->lock);
5125         cache->pinned += num_bytes;
5126         cache->space_info->bytes_pinned += num_bytes;
5127         if (reserved) {
5128                 cache->reserved -= num_bytes;
5129                 cache->space_info->bytes_reserved -= num_bytes;
5130         }
5131         spin_unlock(&cache->lock);
5132         spin_unlock(&cache->space_info->lock);
5133
5134         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5135                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5136         return 0;
5137 }
5138
5139 /*
5140  * this function must be called within transaction
5141  */
5142 int btrfs_pin_extent(struct btrfs_root *root,
5143                      u64 bytenr, u64 num_bytes, int reserved)
5144 {
5145         struct btrfs_block_group_cache *cache;
5146
5147         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5148         BUG_ON(!cache); /* Logic error */
5149
5150         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5151
5152         btrfs_put_block_group(cache);
5153         return 0;
5154 }
5155
5156 /*
5157  * this function must be called within transaction
5158  */
5159 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5160                                     u64 bytenr, u64 num_bytes)
5161 {
5162         struct btrfs_block_group_cache *cache;
5163         int ret;
5164
5165         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5166         if (!cache)
5167                 return -EINVAL;
5168
5169         /*
5170          * pull in the free space cache (if any) so that our pin
5171          * removes the free space from the cache.  We have load_only set
5172          * to one because the slow code to read in the free extents does check
5173          * the pinned extents.
5174          */
5175         cache_block_group(cache, 1);
5176
5177         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5178
5179         /* remove us from the free space cache (if we're there at all) */
5180         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5181         btrfs_put_block_group(cache);
5182         return ret;
5183 }
5184
5185 /**
5186  * btrfs_update_reserved_bytes - update the block_group and space info counters
5187  * @cache:      The cache we are manipulating
5188  * @num_bytes:  The number of bytes in question
5189  * @reserve:    One of the reservation enums
5190  *
5191  * This is called by the allocator when it reserves space, or by somebody who is
5192  * freeing space that was never actually used on disk.  For example if you
5193  * reserve some space for a new leaf in transaction A and before transaction A
5194  * commits you free that leaf, you call this with reserve set to 0 in order to
5195  * clear the reservation.
5196  *
5197  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5198  * ENOSPC accounting.  For data we handle the reservation through clearing the
5199  * delalloc bits in the io_tree.  We have to do this since we could end up
5200  * allocating less disk space for the amount of data we have reserved in the
5201  * case of compression.
5202  *
5203  * If this is a reservation and the block group has become read only we cannot
5204  * make the reservation and return -EAGAIN, otherwise this function always
5205  * succeeds.
5206  */
5207 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5208                                        u64 num_bytes, int reserve)
5209 {
5210         struct btrfs_space_info *space_info = cache->space_info;
5211         int ret = 0;
5212
5213         spin_lock(&space_info->lock);
5214         spin_lock(&cache->lock);
5215         if (reserve != RESERVE_FREE) {
5216                 if (cache->ro) {
5217                         ret = -EAGAIN;
5218                 } else {
5219                         cache->reserved += num_bytes;
5220                         space_info->bytes_reserved += num_bytes;
5221                         if (reserve == RESERVE_ALLOC) {
5222                                 trace_btrfs_space_reservation(cache->fs_info,
5223                                                 "space_info", space_info->flags,
5224                                                 num_bytes, 0);
5225                                 space_info->bytes_may_use -= num_bytes;
5226                         }
5227                 }
5228         } else {
5229                 if (cache->ro)
5230                         space_info->bytes_readonly += num_bytes;
5231                 cache->reserved -= num_bytes;
5232                 space_info->bytes_reserved -= num_bytes;
5233                 space_info->reservation_progress++;
5234         }
5235         spin_unlock(&cache->lock);
5236         spin_unlock(&space_info->lock);
5237         return ret;
5238 }
5239
5240 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5241                                 struct btrfs_root *root)
5242 {
5243         struct btrfs_fs_info *fs_info = root->fs_info;
5244         struct btrfs_caching_control *next;
5245         struct btrfs_caching_control *caching_ctl;
5246         struct btrfs_block_group_cache *cache;
5247
5248         down_write(&fs_info->extent_commit_sem);
5249
5250         list_for_each_entry_safe(caching_ctl, next,
5251                                  &fs_info->caching_block_groups, list) {
5252                 cache = caching_ctl->block_group;
5253                 if (block_group_cache_done(cache)) {
5254                         cache->last_byte_to_unpin = (u64)-1;
5255                         list_del_init(&caching_ctl->list);
5256                         put_caching_control(caching_ctl);
5257                 } else {
5258                         cache->last_byte_to_unpin = caching_ctl->progress;
5259                 }
5260         }
5261
5262         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5263                 fs_info->pinned_extents = &fs_info->freed_extents[1];
5264         else
5265                 fs_info->pinned_extents = &fs_info->freed_extents[0];
5266
5267         up_write(&fs_info->extent_commit_sem);
5268
5269         update_global_block_rsv(fs_info);
5270 }
5271
5272 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5273 {
5274         struct btrfs_fs_info *fs_info = root->fs_info;
5275         struct btrfs_block_group_cache *cache = NULL;
5276         struct btrfs_space_info *space_info;
5277         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5278         u64 len;
5279         bool readonly;
5280
5281         while (start <= end) {
5282                 readonly = false;
5283                 if (!cache ||
5284                     start >= cache->key.objectid + cache->key.offset) {
5285                         if (cache)
5286                                 btrfs_put_block_group(cache);
5287                         cache = btrfs_lookup_block_group(fs_info, start);
5288                         BUG_ON(!cache); /* Logic error */
5289                 }
5290
5291                 len = cache->key.objectid + cache->key.offset - start;
5292                 len = min(len, end + 1 - start);
5293
5294                 if (start < cache->last_byte_to_unpin) {
5295                         len = min(len, cache->last_byte_to_unpin - start);
5296                         btrfs_add_free_space(cache, start, len);
5297                 }
5298
5299                 start += len;
5300                 space_info = cache->space_info;
5301
5302                 spin_lock(&space_info->lock);
5303                 spin_lock(&cache->lock);
5304                 cache->pinned -= len;
5305                 space_info->bytes_pinned -= len;
5306                 if (cache->ro) {
5307                         space_info->bytes_readonly += len;
5308                         readonly = true;
5309                 }
5310                 spin_unlock(&cache->lock);
5311                 if (!readonly && global_rsv->space_info == space_info) {
5312                         spin_lock(&global_rsv->lock);
5313                         if (!global_rsv->full) {
5314                                 len = min(len, global_rsv->size -
5315                                           global_rsv->reserved);
5316                                 global_rsv->reserved += len;
5317                                 space_info->bytes_may_use += len;
5318                                 if (global_rsv->reserved >= global_rsv->size)
5319                                         global_rsv->full = 1;
5320                         }
5321                         spin_unlock(&global_rsv->lock);
5322                 }
5323                 spin_unlock(&space_info->lock);
5324         }
5325
5326         if (cache)
5327                 btrfs_put_block_group(cache);
5328         return 0;
5329 }
5330
5331 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5332                                struct btrfs_root *root)
5333 {
5334         struct btrfs_fs_info *fs_info = root->fs_info;
5335         struct extent_io_tree *unpin;
5336         u64 start;
5337         u64 end;
5338         int ret;
5339
5340         if (trans->aborted)
5341                 return 0;
5342
5343         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5344                 unpin = &fs_info->freed_extents[1];
5345         else
5346                 unpin = &fs_info->freed_extents[0];
5347
5348         while (1) {
5349                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5350                                             EXTENT_DIRTY, NULL);
5351                 if (ret)
5352                         break;
5353
5354                 if (btrfs_test_opt(root, DISCARD))
5355                         ret = btrfs_discard_extent(root, start,
5356                                                    end + 1 - start, NULL);
5357
5358                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5359                 unpin_extent_range(root, start, end);
5360                 cond_resched();
5361         }
5362
5363         return 0;
5364 }
5365
5366 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5367                                 struct btrfs_root *root,
5368                                 u64 bytenr, u64 num_bytes, u64 parent,
5369                                 u64 root_objectid, u64 owner_objectid,
5370                                 u64 owner_offset, int refs_to_drop,
5371                                 struct btrfs_delayed_extent_op *extent_op)
5372 {
5373         struct btrfs_key key;
5374         struct btrfs_path *path;
5375         struct btrfs_fs_info *info = root->fs_info;
5376         struct btrfs_root *extent_root = info->extent_root;
5377         struct extent_buffer *leaf;
5378         struct btrfs_extent_item *ei;
5379         struct btrfs_extent_inline_ref *iref;
5380         int ret;
5381         int is_data;
5382         int extent_slot = 0;
5383         int found_extent = 0;
5384         int num_to_del = 1;
5385         u32 item_size;
5386         u64 refs;
5387         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
5388                                                  SKINNY_METADATA);
5389
5390         path = btrfs_alloc_path();
5391         if (!path)
5392                 return -ENOMEM;
5393
5394         path->reada = 1;
5395         path->leave_spinning = 1;
5396
5397         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5398         BUG_ON(!is_data && refs_to_drop != 1);
5399
5400         if (is_data)
5401                 skinny_metadata = 0;
5402
5403         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5404                                     bytenr, num_bytes, parent,
5405                                     root_objectid, owner_objectid,
5406                                     owner_offset);
5407         if (ret == 0) {
5408                 extent_slot = path->slots[0];
5409                 while (extent_slot >= 0) {
5410                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5411                                               extent_slot);
5412                         if (key.objectid != bytenr)
5413                                 break;
5414                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5415                             key.offset == num_bytes) {
5416                                 found_extent = 1;
5417                                 break;
5418                         }
5419                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
5420                             key.offset == owner_objectid) {
5421                                 found_extent = 1;
5422                                 break;
5423                         }
5424                         if (path->slots[0] - extent_slot > 5)
5425                                 break;
5426                         extent_slot--;
5427                 }
5428 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5429                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5430                 if (found_extent && item_size < sizeof(*ei))
5431                         found_extent = 0;
5432 #endif
5433                 if (!found_extent) {
5434                         BUG_ON(iref);
5435                         ret = remove_extent_backref(trans, extent_root, path,
5436                                                     NULL, refs_to_drop,
5437                                                     is_data);
5438                         if (ret) {
5439                                 btrfs_abort_transaction(trans, extent_root, ret);
5440                                 goto out;
5441                         }
5442                         btrfs_release_path(path);
5443                         path->leave_spinning = 1;
5444
5445                         key.objectid = bytenr;
5446                         key.type = BTRFS_EXTENT_ITEM_KEY;
5447                         key.offset = num_bytes;
5448
5449                         if (!is_data && skinny_metadata) {
5450                                 key.type = BTRFS_METADATA_ITEM_KEY;
5451                                 key.offset = owner_objectid;
5452                         }
5453
5454                         ret = btrfs_search_slot(trans, extent_root,
5455                                                 &key, path, -1, 1);
5456                         if (ret > 0 && skinny_metadata && path->slots[0]) {
5457                                 /*
5458                                  * Couldn't find our skinny metadata item,
5459                                  * see if we have ye olde extent item.
5460                                  */
5461                                 path->slots[0]--;
5462                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
5463                                                       path->slots[0]);
5464                                 if (key.objectid == bytenr &&
5465                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
5466                                     key.offset == num_bytes)
5467                                         ret = 0;
5468                         }
5469
5470                         if (ret > 0 && skinny_metadata) {
5471                                 skinny_metadata = false;
5472                                 key.type = BTRFS_EXTENT_ITEM_KEY;
5473                                 key.offset = num_bytes;
5474                                 btrfs_release_path(path);
5475                                 ret = btrfs_search_slot(trans, extent_root,
5476                                                         &key, path, -1, 1);
5477                         }
5478
5479                         if (ret) {
5480                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5481                                         ret, (unsigned long long)bytenr);
5482                                 if (ret > 0)
5483                                         btrfs_print_leaf(extent_root,
5484                                                          path->nodes[0]);
5485                         }
5486                         if (ret < 0) {
5487                                 btrfs_abort_transaction(trans, extent_root, ret);
5488                                 goto out;
5489                         }
5490                         extent_slot = path->slots[0];
5491                 }
5492         } else if (ret == -ENOENT) {
5493                 btrfs_print_leaf(extent_root, path->nodes[0]);
5494                 WARN_ON(1);
5495                 btrfs_err(info,
5496                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
5497                         (unsigned long long)bytenr,
5498                         (unsigned long long)parent,
5499                         (unsigned long long)root_objectid,
5500                         (unsigned long long)owner_objectid,
5501                         (unsigned long long)owner_offset);
5502         } else {
5503                 btrfs_abort_transaction(trans, extent_root, ret);
5504                 goto out;
5505         }
5506
5507         leaf = path->nodes[0];
5508         item_size = btrfs_item_size_nr(leaf, extent_slot);
5509 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5510         if (item_size < sizeof(*ei)) {
5511                 BUG_ON(found_extent || extent_slot != path->slots[0]);
5512                 ret = convert_extent_item_v0(trans, extent_root, path,
5513                                              owner_objectid, 0);
5514                 if (ret < 0) {
5515                         btrfs_abort_transaction(trans, extent_root, ret);
5516                         goto out;
5517                 }
5518
5519                 btrfs_release_path(path);
5520                 path->leave_spinning = 1;
5521
5522                 key.objectid = bytenr;
5523                 key.type = BTRFS_EXTENT_ITEM_KEY;
5524                 key.offset = num_bytes;
5525
5526                 ret = btrfs_search_slot(trans, extent_root, &key, path,
5527                                         -1, 1);
5528                 if (ret) {
5529                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5530                                 ret, (unsigned long long)bytenr);
5531                         btrfs_print_leaf(extent_root, path->nodes[0]);
5532                 }
5533                 if (ret < 0) {
5534                         btrfs_abort_transaction(trans, extent_root, ret);
5535                         goto out;
5536                 }
5537
5538                 extent_slot = path->slots[0];
5539                 leaf = path->nodes[0];
5540                 item_size = btrfs_item_size_nr(leaf, extent_slot);
5541         }
5542 #endif
5543         BUG_ON(item_size < sizeof(*ei));
5544         ei = btrfs_item_ptr(leaf, extent_slot,
5545                             struct btrfs_extent_item);
5546         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
5547             key.type == BTRFS_EXTENT_ITEM_KEY) {
5548                 struct btrfs_tree_block_info *bi;
5549                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5550                 bi = (struct btrfs_tree_block_info *)(ei + 1);
5551                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5552         }
5553
5554         refs = btrfs_extent_refs(leaf, ei);
5555         if (refs < refs_to_drop) {
5556                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
5557                           "for bytenr %Lu\n", refs_to_drop, refs, bytenr);
5558                 ret = -EINVAL;
5559                 btrfs_abort_transaction(trans, extent_root, ret);
5560                 goto out;
5561         }
5562         refs -= refs_to_drop;
5563
5564         if (refs > 0) {
5565                 if (extent_op)
5566                         __run_delayed_extent_op(extent_op, leaf, ei);
5567                 /*
5568                  * In the case of inline back ref, reference count will
5569                  * be updated by remove_extent_backref
5570                  */
5571                 if (iref) {
5572                         BUG_ON(!found_extent);
5573                 } else {
5574                         btrfs_set_extent_refs(leaf, ei, refs);
5575                         btrfs_mark_buffer_dirty(leaf);
5576                 }
5577                 if (found_extent) {
5578                         ret = remove_extent_backref(trans, extent_root, path,
5579                                                     iref, refs_to_drop,
5580                                                     is_data);
5581                         if (ret) {
5582                                 btrfs_abort_transaction(trans, extent_root, ret);
5583                                 goto out;
5584                         }
5585                 }
5586         } else {
5587                 if (found_extent) {
5588                         BUG_ON(is_data && refs_to_drop !=
5589                                extent_data_ref_count(root, path, iref));
5590                         if (iref) {
5591                                 BUG_ON(path->slots[0] != extent_slot);
5592                         } else {
5593                                 BUG_ON(path->slots[0] != extent_slot + 1);
5594                                 path->slots[0] = extent_slot;
5595                                 num_to_del = 2;
5596                         }
5597                 }
5598
5599                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5600                                       num_to_del);
5601                 if (ret) {
5602                         btrfs_abort_transaction(trans, extent_root, ret);
5603                         goto out;
5604                 }
5605                 btrfs_release_path(path);
5606
5607                 if (is_data) {
5608                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5609                         if (ret) {
5610                                 btrfs_abort_transaction(trans, extent_root, ret);
5611                                 goto out;
5612                         }
5613                 }
5614
5615                 ret = update_block_group(root, bytenr, num_bytes, 0);
5616                 if (ret) {
5617                         btrfs_abort_transaction(trans, extent_root, ret);
5618                         goto out;
5619                 }
5620         }
5621 out:
5622         btrfs_free_path(path);
5623         return ret;
5624 }
5625
5626 /*
5627  * when we free an block, it is possible (and likely) that we free the last
5628  * delayed ref for that extent as well.  This searches the delayed ref tree for
5629  * a given extent, and if there are no other delayed refs to be processed, it
5630  * removes it from the tree.
5631  */
5632 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5633                                       struct btrfs_root *root, u64 bytenr)
5634 {
5635         struct btrfs_delayed_ref_head *head;
5636         struct btrfs_delayed_ref_root *delayed_refs;
5637         struct btrfs_delayed_ref_node *ref;
5638         struct rb_node *node;
5639         int ret = 0;
5640
5641         delayed_refs = &trans->transaction->delayed_refs;
5642         spin_lock(&delayed_refs->lock);
5643         head = btrfs_find_delayed_ref_head(trans, bytenr);
5644         if (!head)
5645                 goto out;
5646
5647         node = rb_prev(&head->node.rb_node);
5648         if (!node)
5649                 goto out;
5650
5651         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5652
5653         /* there are still entries for this ref, we can't drop it */
5654         if (ref->bytenr == bytenr)
5655                 goto out;
5656
5657         if (head->extent_op) {
5658                 if (!head->must_insert_reserved)
5659                         goto out;
5660                 btrfs_free_delayed_extent_op(head->extent_op);
5661                 head->extent_op = NULL;
5662         }
5663
5664         /*
5665          * waiting for the lock here would deadlock.  If someone else has it
5666          * locked they are already in the process of dropping it anyway
5667          */
5668         if (!mutex_trylock(&head->mutex))
5669                 goto out;
5670
5671         /*
5672          * at this point we have a head with no other entries.  Go
5673          * ahead and process it.
5674          */
5675         head->node.in_tree = 0;
5676         rb_erase(&head->node.rb_node, &delayed_refs->root);
5677
5678         delayed_refs->num_entries--;
5679
5680         /*
5681          * we don't take a ref on the node because we're removing it from the
5682          * tree, so we just steal the ref the tree was holding.
5683          */
5684         delayed_refs->num_heads--;
5685         if (list_empty(&head->cluster))
5686                 delayed_refs->num_heads_ready--;
5687
5688         list_del_init(&head->cluster);
5689         spin_unlock(&delayed_refs->lock);
5690
5691         BUG_ON(head->extent_op);
5692         if (head->must_insert_reserved)
5693                 ret = 1;
5694
5695         mutex_unlock(&head->mutex);
5696         btrfs_put_delayed_ref(&head->node);
5697         return ret;
5698 out:
5699         spin_unlock(&delayed_refs->lock);
5700         return 0;
5701 }
5702
5703 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5704                            struct btrfs_root *root,
5705                            struct extent_buffer *buf,
5706                            u64 parent, int last_ref)
5707 {
5708         struct btrfs_block_group_cache *cache = NULL;
5709         int ret;
5710
5711         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5712                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5713                                         buf->start, buf->len,
5714                                         parent, root->root_key.objectid,
5715                                         btrfs_header_level(buf),
5716                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
5717                 BUG_ON(ret); /* -ENOMEM */
5718         }
5719
5720         if (!last_ref)
5721                 return;
5722
5723         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5724
5725         if (btrfs_header_generation(buf) == trans->transid) {
5726                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5727                         ret = check_ref_cleanup(trans, root, buf->start);
5728                         if (!ret)
5729                                 goto out;
5730                 }
5731
5732                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5733                         pin_down_extent(root, cache, buf->start, buf->len, 1);
5734                         goto out;
5735                 }
5736
5737                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5738
5739                 btrfs_add_free_space(cache, buf->start, buf->len);
5740                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5741         }
5742 out:
5743         /*
5744          * Deleting the buffer, clear the corrupt flag since it doesn't matter
5745          * anymore.
5746          */
5747         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5748         btrfs_put_block_group(cache);
5749 }
5750
5751 /* Can return -ENOMEM */
5752 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5753                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5754                       u64 owner, u64 offset, int for_cow)
5755 {
5756         int ret;
5757         struct btrfs_fs_info *fs_info = root->fs_info;
5758
5759         /*
5760          * tree log blocks never actually go into the extent allocation
5761          * tree, just update pinning info and exit early.
5762          */
5763         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5764                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
5765                 /* unlocks the pinned mutex */
5766                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
5767                 ret = 0;
5768         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5769                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
5770                                         num_bytes,
5771                                         parent, root_objectid, (int)owner,
5772                                         BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5773         } else {
5774                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
5775                                                 num_bytes,
5776                                                 parent, root_objectid, owner,
5777                                                 offset, BTRFS_DROP_DELAYED_REF,
5778                                                 NULL, for_cow);
5779         }
5780         return ret;
5781 }
5782
5783 static u64 stripe_align(struct btrfs_root *root,
5784                         struct btrfs_block_group_cache *cache,
5785                         u64 val, u64 num_bytes)
5786 {
5787         u64 ret = ALIGN(val, root->stripesize);
5788         return ret;
5789 }
5790
5791 /*
5792  * when we wait for progress in the block group caching, its because
5793  * our allocation attempt failed at least once.  So, we must sleep
5794  * and let some progress happen before we try again.
5795  *
5796  * This function will sleep at least once waiting for new free space to
5797  * show up, and then it will check the block group free space numbers
5798  * for our min num_bytes.  Another option is to have it go ahead
5799  * and look in the rbtree for a free extent of a given size, but this
5800  * is a good start.
5801  */
5802 static noinline int
5803 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
5804                                 u64 num_bytes)
5805 {
5806         struct btrfs_caching_control *caching_ctl;
5807
5808         caching_ctl = get_caching_control(cache);
5809         if (!caching_ctl)
5810                 return 0;
5811
5812         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
5813                    (cache->free_space_ctl->free_space >= num_bytes));
5814
5815         put_caching_control(caching_ctl);
5816         return 0;
5817 }
5818
5819 static noinline int
5820 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
5821 {
5822         struct btrfs_caching_control *caching_ctl;
5823
5824         caching_ctl = get_caching_control(cache);
5825         if (!caching_ctl)
5826                 return 0;
5827
5828         wait_event(caching_ctl->wait, block_group_cache_done(cache));
5829
5830         put_caching_control(caching_ctl);
5831         return 0;
5832 }
5833
5834 int __get_raid_index(u64 flags)
5835 {
5836         if (flags & BTRFS_BLOCK_GROUP_RAID10)
5837                 return BTRFS_RAID_RAID10;
5838         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
5839                 return BTRFS_RAID_RAID1;
5840         else if (flags & BTRFS_BLOCK_GROUP_DUP)
5841                 return BTRFS_RAID_DUP;
5842         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
5843                 return BTRFS_RAID_RAID0;
5844         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
5845                 return BTRFS_RAID_RAID5;
5846         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
5847                 return BTRFS_RAID_RAID6;
5848
5849         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
5850 }
5851
5852 static int get_block_group_index(struct btrfs_block_group_cache *cache)
5853 {
5854         return __get_raid_index(cache->flags);
5855 }
5856
5857 enum btrfs_loop_type {
5858         LOOP_CACHING_NOWAIT = 0,
5859         LOOP_CACHING_WAIT = 1,
5860         LOOP_ALLOC_CHUNK = 2,
5861         LOOP_NO_EMPTY_SIZE = 3,
5862 };
5863
5864 /*
5865  * walks the btree of allocated extents and find a hole of a given size.
5866  * The key ins is changed to record the hole:
5867  * ins->objectid == block start
5868  * ins->flags = BTRFS_EXTENT_ITEM_KEY
5869  * ins->offset == number of blocks
5870  * Any available blocks before search_start are skipped.
5871  */
5872 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5873                                      struct btrfs_root *orig_root,
5874                                      u64 num_bytes, u64 empty_size,
5875                                      u64 hint_byte, struct btrfs_key *ins,
5876                                      u64 flags)
5877 {
5878         int ret = 0;
5879         struct btrfs_root *root = orig_root->fs_info->extent_root;
5880         struct btrfs_free_cluster *last_ptr = NULL;
5881         struct btrfs_block_group_cache *block_group = NULL;
5882         struct btrfs_block_group_cache *used_block_group;
5883         u64 search_start = 0;
5884         int empty_cluster = 2 * 1024 * 1024;
5885         struct btrfs_space_info *space_info;
5886         int loop = 0;
5887         int index = __get_raid_index(flags);
5888         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
5889                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
5890         bool found_uncached_bg = false;
5891         bool failed_cluster_refill = false;
5892         bool failed_alloc = false;
5893         bool use_cluster = true;
5894         bool have_caching_bg = false;
5895
5896         WARN_ON(num_bytes < root->sectorsize);
5897         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
5898         ins->objectid = 0;
5899         ins->offset = 0;
5900
5901         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
5902
5903         space_info = __find_space_info(root->fs_info, flags);
5904         if (!space_info) {
5905                 btrfs_err(root->fs_info, "No space info for %llu", flags);
5906                 return -ENOSPC;
5907         }
5908
5909         /*
5910          * If the space info is for both data and metadata it means we have a
5911          * small filesystem and we can't use the clustering stuff.
5912          */
5913         if (btrfs_mixed_space_info(space_info))
5914                 use_cluster = false;
5915
5916         if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
5917                 last_ptr = &root->fs_info->meta_alloc_cluster;
5918                 if (!btrfs_test_opt(root, SSD))
5919                         empty_cluster = 64 * 1024;
5920         }
5921
5922         if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
5923             btrfs_test_opt(root, SSD)) {
5924                 last_ptr = &root->fs_info->data_alloc_cluster;
5925         }
5926
5927         if (last_ptr) {
5928                 spin_lock(&last_ptr->lock);
5929                 if (last_ptr->block_group)
5930                         hint_byte = last_ptr->window_start;
5931                 spin_unlock(&last_ptr->lock);
5932         }
5933
5934         search_start = max(search_start, first_logical_byte(root, 0));
5935         search_start = max(search_start, hint_byte);
5936
5937         if (!last_ptr)
5938                 empty_cluster = 0;
5939
5940         if (search_start == hint_byte) {
5941                 block_group = btrfs_lookup_block_group(root->fs_info,
5942                                                        search_start);
5943                 used_block_group = block_group;
5944                 /*
5945                  * we don't want to use the block group if it doesn't match our
5946                  * allocation bits, or if its not cached.
5947                  *
5948                  * However if we are re-searching with an ideal block group
5949                  * picked out then we don't care that the block group is cached.
5950                  */
5951                 if (block_group && block_group_bits(block_group, flags) &&
5952                     block_group->cached != BTRFS_CACHE_NO) {
5953                         down_read(&space_info->groups_sem);
5954                         if (list_empty(&block_group->list) ||
5955                             block_group->ro) {
5956                                 /*
5957                                  * someone is removing this block group,
5958                                  * we can't jump into the have_block_group
5959                                  * target because our list pointers are not
5960                                  * valid
5961                                  */
5962                                 btrfs_put_block_group(block_group);
5963                                 up_read(&space_info->groups_sem);
5964                         } else {
5965                                 index = get_block_group_index(block_group);
5966                                 goto have_block_group;
5967                         }
5968                 } else if (block_group) {
5969                         btrfs_put_block_group(block_group);
5970                 }
5971         }
5972 search:
5973         have_caching_bg = false;
5974         down_read(&space_info->groups_sem);
5975         list_for_each_entry(block_group, &space_info->block_groups[index],
5976                             list) {
5977                 u64 offset;
5978                 int cached;
5979
5980                 used_block_group = block_group;
5981                 btrfs_get_block_group(block_group);
5982                 search_start = block_group->key.objectid;
5983
5984                 /*
5985                  * this can happen if we end up cycling through all the
5986                  * raid types, but we want to make sure we only allocate
5987                  * for the proper type.
5988                  */
5989                 if (!block_group_bits(block_group, flags)) {
5990                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
5991                                 BTRFS_BLOCK_GROUP_RAID1 |
5992                                 BTRFS_BLOCK_GROUP_RAID5 |
5993                                 BTRFS_BLOCK_GROUP_RAID6 |
5994                                 BTRFS_BLOCK_GROUP_RAID10;
5995
5996                         /*
5997                          * if they asked for extra copies and this block group
5998                          * doesn't provide them, bail.  This does allow us to
5999                          * fill raid0 from raid1.
6000                          */
6001                         if ((flags & extra) && !(block_group->flags & extra))
6002                                 goto loop;
6003                 }
6004
6005 have_block_group:
6006                 cached = block_group_cache_done(block_group);
6007                 if (unlikely(!cached)) {
6008                         found_uncached_bg = true;
6009                         ret = cache_block_group(block_group, 0);
6010                         BUG_ON(ret < 0);
6011                         ret = 0;
6012                 }
6013
6014                 if (unlikely(block_group->ro))
6015                         goto loop;
6016
6017                 /*
6018                  * Ok we want to try and use the cluster allocator, so
6019                  * lets look there
6020                  */
6021                 if (last_ptr) {
6022                         unsigned long aligned_cluster;
6023                         /*
6024                          * the refill lock keeps out other
6025                          * people trying to start a new cluster
6026                          */
6027                         spin_lock(&last_ptr->refill_lock);
6028                         used_block_group = last_ptr->block_group;
6029                         if (used_block_group != block_group &&
6030                             (!used_block_group ||
6031                              used_block_group->ro ||
6032                              !block_group_bits(used_block_group, flags))) {
6033                                 used_block_group = block_group;
6034                                 goto refill_cluster;
6035                         }
6036
6037                         if (used_block_group != block_group)
6038                                 btrfs_get_block_group(used_block_group);
6039
6040                         offset = btrfs_alloc_from_cluster(used_block_group,
6041                           last_ptr, num_bytes, used_block_group->key.objectid);
6042                         if (offset) {
6043                                 /* we have a block, we're done */
6044                                 spin_unlock(&last_ptr->refill_lock);
6045                                 trace_btrfs_reserve_extent_cluster(root,
6046                                         block_group, search_start, num_bytes);
6047                                 goto checks;
6048                         }
6049
6050                         WARN_ON(last_ptr->block_group != used_block_group);
6051                         if (used_block_group != block_group) {
6052                                 btrfs_put_block_group(used_block_group);
6053                                 used_block_group = block_group;
6054                         }
6055 refill_cluster:
6056                         BUG_ON(used_block_group != block_group);
6057                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6058                          * set up a new clusters, so lets just skip it
6059                          * and let the allocator find whatever block
6060                          * it can find.  If we reach this point, we
6061                          * will have tried the cluster allocator
6062                          * plenty of times and not have found
6063                          * anything, so we are likely way too
6064                          * fragmented for the clustering stuff to find
6065                          * anything.
6066                          *
6067                          * However, if the cluster is taken from the
6068                          * current block group, release the cluster
6069                          * first, so that we stand a better chance of
6070                          * succeeding in the unclustered
6071                          * allocation.  */
6072                         if (loop >= LOOP_NO_EMPTY_SIZE &&
6073                             last_ptr->block_group != block_group) {
6074                                 spin_unlock(&last_ptr->refill_lock);
6075                                 goto unclustered_alloc;
6076                         }
6077
6078                         /*
6079                          * this cluster didn't work out, free it and
6080                          * start over
6081                          */
6082                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6083
6084                         if (loop >= LOOP_NO_EMPTY_SIZE) {
6085                                 spin_unlock(&last_ptr->refill_lock);
6086                                 goto unclustered_alloc;
6087                         }
6088
6089                         aligned_cluster = max_t(unsigned long,
6090                                                 empty_cluster + empty_size,
6091                                               block_group->full_stripe_len);
6092
6093                         /* allocate a cluster in this block group */
6094                         ret = btrfs_find_space_cluster(trans, root,
6095                                                block_group, last_ptr,
6096                                                search_start, num_bytes,
6097                                                aligned_cluster);
6098                         if (ret == 0) {
6099                                 /*
6100                                  * now pull our allocation out of this
6101                                  * cluster
6102                                  */
6103                                 offset = btrfs_alloc_from_cluster(block_group,
6104                                                   last_ptr, num_bytes,
6105                                                   search_start);
6106                                 if (offset) {
6107                                         /* we found one, proceed */
6108                                         spin_unlock(&last_ptr->refill_lock);
6109                                         trace_btrfs_reserve_extent_cluster(root,
6110                                                 block_group, search_start,
6111                                                 num_bytes);
6112                                         goto checks;
6113                                 }
6114                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
6115                                    && !failed_cluster_refill) {
6116                                 spin_unlock(&last_ptr->refill_lock);
6117
6118                                 failed_cluster_refill = true;
6119                                 wait_block_group_cache_progress(block_group,
6120                                        num_bytes + empty_cluster + empty_size);
6121                                 goto have_block_group;
6122                         }
6123
6124                         /*
6125                          * at this point we either didn't find a cluster
6126                          * or we weren't able to allocate a block from our
6127                          * cluster.  Free the cluster we've been trying
6128                          * to use, and go to the next block group
6129                          */
6130                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6131                         spin_unlock(&last_ptr->refill_lock);
6132                         goto loop;
6133                 }
6134
6135 unclustered_alloc:
6136                 spin_lock(&block_group->free_space_ctl->tree_lock);
6137                 if (cached &&
6138                     block_group->free_space_ctl->free_space <
6139                     num_bytes + empty_cluster + empty_size) {
6140                         spin_unlock(&block_group->free_space_ctl->tree_lock);
6141                         goto loop;
6142                 }
6143                 spin_unlock(&block_group->free_space_ctl->tree_lock);
6144
6145                 offset = btrfs_find_space_for_alloc(block_group, search_start,
6146                                                     num_bytes, empty_size);
6147                 /*
6148                  * If we didn't find a chunk, and we haven't failed on this
6149                  * block group before, and this block group is in the middle of
6150                  * caching and we are ok with waiting, then go ahead and wait
6151                  * for progress to be made, and set failed_alloc to true.
6152                  *
6153                  * If failed_alloc is true then we've already waited on this
6154                  * block group once and should move on to the next block group.
6155                  */
6156                 if (!offset && !failed_alloc && !cached &&
6157                     loop > LOOP_CACHING_NOWAIT) {
6158                         wait_block_group_cache_progress(block_group,
6159                                                 num_bytes + empty_size);
6160                         failed_alloc = true;
6161                         goto have_block_group;
6162                 } else if (!offset) {
6163                         if (!cached)
6164                                 have_caching_bg = true;
6165                         goto loop;
6166                 }
6167 checks:
6168                 search_start = stripe_align(root, used_block_group,
6169                                             offset, num_bytes);
6170
6171                 /* move on to the next group */
6172                 if (search_start + num_bytes >
6173                     used_block_group->key.objectid + used_block_group->key.offset) {
6174                         btrfs_add_free_space(used_block_group, offset, num_bytes);
6175                         goto loop;
6176                 }
6177
6178                 if (offset < search_start)
6179                         btrfs_add_free_space(used_block_group, offset,
6180                                              search_start - offset);
6181                 BUG_ON(offset > search_start);
6182
6183                 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
6184                                                   alloc_type);
6185                 if (ret == -EAGAIN) {
6186                         btrfs_add_free_space(used_block_group, offset, num_bytes);
6187                         goto loop;
6188                 }
6189
6190                 /* we are all good, lets return */
6191                 ins->objectid = search_start;
6192                 ins->offset = num_bytes;
6193
6194                 trace_btrfs_reserve_extent(orig_root, block_group,
6195                                            search_start, num_bytes);
6196                 if (used_block_group != block_group)
6197                         btrfs_put_block_group(used_block_group);
6198                 btrfs_put_block_group(block_group);
6199                 break;
6200 loop:
6201                 failed_cluster_refill = false;
6202                 failed_alloc = false;
6203                 BUG_ON(index != get_block_group_index(block_group));
6204                 if (used_block_group != block_group)
6205                         btrfs_put_block_group(used_block_group);
6206                 btrfs_put_block_group(block_group);
6207         }
6208         up_read(&space_info->groups_sem);
6209
6210         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6211                 goto search;
6212
6213         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6214                 goto search;
6215
6216         /*
6217          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6218          *                      caching kthreads as we move along
6219          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6220          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6221          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6222          *                      again
6223          */
6224         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6225                 index = 0;
6226                 loop++;
6227                 if (loop == LOOP_ALLOC_CHUNK) {
6228                         ret = do_chunk_alloc(trans, root, flags,
6229                                              CHUNK_ALLOC_FORCE);
6230                         /*
6231                          * Do not bail out on ENOSPC since we
6232                          * can do more things.
6233                          */
6234                         if (ret < 0 && ret != -ENOSPC) {
6235                                 btrfs_abort_transaction(trans,
6236                                                         root, ret);
6237                                 goto out;
6238                         }
6239                 }
6240
6241                 if (loop == LOOP_NO_EMPTY_SIZE) {
6242                         empty_size = 0;
6243                         empty_cluster = 0;
6244                 }
6245
6246                 goto search;
6247         } else if (!ins->objectid) {
6248                 ret = -ENOSPC;
6249         } else if (ins->objectid) {
6250                 ret = 0;
6251         }
6252 out:
6253
6254         return ret;
6255 }
6256
6257 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6258                             int dump_block_groups)
6259 {
6260         struct btrfs_block_group_cache *cache;
6261         int index = 0;
6262
6263         spin_lock(&info->lock);
6264         printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
6265                (unsigned long long)info->flags,
6266                (unsigned long long)(info->total_bytes - info->bytes_used -
6267                                     info->bytes_pinned - info->bytes_reserved -
6268                                     info->bytes_readonly),
6269                (info->full) ? "" : "not ");
6270         printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
6271                "reserved=%llu, may_use=%llu, readonly=%llu\n",
6272                (unsigned long long)info->total_bytes,
6273                (unsigned long long)info->bytes_used,
6274                (unsigned long long)info->bytes_pinned,
6275                (unsigned long long)info->bytes_reserved,
6276                (unsigned long long)info->bytes_may_use,
6277                (unsigned long long)info->bytes_readonly);
6278         spin_unlock(&info->lock);
6279
6280         if (!dump_block_groups)
6281                 return;
6282
6283         down_read(&info->groups_sem);
6284 again:
6285         list_for_each_entry(cache, &info->block_groups[index], list) {
6286                 spin_lock(&cache->lock);
6287                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
6288                        (unsigned long long)cache->key.objectid,
6289                        (unsigned long long)cache->key.offset,
6290                        (unsigned long long)btrfs_block_group_used(&cache->item),
6291                        (unsigned long long)cache->pinned,
6292                        (unsigned long long)cache->reserved,
6293                        cache->ro ? "[readonly]" : "");
6294                 btrfs_dump_free_space(cache, bytes);
6295                 spin_unlock(&cache->lock);
6296         }
6297         if (++index < BTRFS_NR_RAID_TYPES)
6298                 goto again;
6299         up_read(&info->groups_sem);
6300 }
6301
6302 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
6303                          struct btrfs_root *root,
6304                          u64 num_bytes, u64 min_alloc_size,
6305                          u64 empty_size, u64 hint_byte,
6306                          struct btrfs_key *ins, int is_data)
6307 {
6308         bool final_tried = false;
6309         u64 flags;
6310         int ret;
6311
6312         flags = btrfs_get_alloc_profile(root, is_data);
6313 again:
6314         WARN_ON(num_bytes < root->sectorsize);
6315         ret = find_free_extent(trans, root, num_bytes, empty_size,
6316                                hint_byte, ins, flags);
6317
6318         if (ret == -ENOSPC) {
6319                 if (!final_tried) {
6320                         num_bytes = num_bytes >> 1;
6321                         num_bytes = round_down(num_bytes, root->sectorsize);
6322                         num_bytes = max(num_bytes, min_alloc_size);
6323                         if (num_bytes == min_alloc_size)
6324                                 final_tried = true;
6325                         goto again;
6326                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6327                         struct btrfs_space_info *sinfo;
6328
6329                         sinfo = __find_space_info(root->fs_info, flags);
6330                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6331                                 (unsigned long long)flags,
6332                                 (unsigned long long)num_bytes);
6333                         if (sinfo)
6334                                 dump_space_info(sinfo, num_bytes, 1);
6335                 }
6336         }
6337
6338         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
6339
6340         return ret;
6341 }
6342
6343 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6344                                         u64 start, u64 len, int pin)
6345 {
6346         struct btrfs_block_group_cache *cache;
6347         int ret = 0;
6348
6349         cache = btrfs_lookup_block_group(root->fs_info, start);
6350         if (!cache) {
6351                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
6352                         (unsigned long long)start);
6353                 return -ENOSPC;
6354         }
6355
6356         if (btrfs_test_opt(root, DISCARD))
6357                 ret = btrfs_discard_extent(root, start, len, NULL);
6358
6359         if (pin)
6360                 pin_down_extent(root, cache, start, len, 1);
6361         else {
6362                 btrfs_add_free_space(cache, start, len);
6363                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6364         }
6365         btrfs_put_block_group(cache);
6366
6367         trace_btrfs_reserved_extent_free(root, start, len);
6368
6369         return ret;
6370 }
6371
6372 int btrfs_free_reserved_extent(struct btrfs_root *root,
6373                                         u64 start, u64 len)
6374 {
6375         return __btrfs_free_reserved_extent(root, start, len, 0);
6376 }
6377
6378 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6379                                        u64 start, u64 len)
6380 {
6381         return __btrfs_free_reserved_extent(root, start, len, 1);
6382 }
6383
6384 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6385                                       struct btrfs_root *root,
6386                                       u64 parent, u64 root_objectid,
6387                                       u64 flags, u64 owner, u64 offset,
6388                                       struct btrfs_key *ins, int ref_mod)
6389 {
6390         int ret;
6391         struct btrfs_fs_info *fs_info = root->fs_info;
6392         struct btrfs_extent_item *extent_item;
6393         struct btrfs_extent_inline_ref *iref;
6394         struct btrfs_path *path;
6395         struct extent_buffer *leaf;
6396         int type;
6397         u32 size;
6398
6399         if (parent > 0)
6400                 type = BTRFS_SHARED_DATA_REF_KEY;
6401         else
6402                 type = BTRFS_EXTENT_DATA_REF_KEY;
6403
6404         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6405
6406         path = btrfs_alloc_path();
6407         if (!path)
6408                 return -ENOMEM;
6409
6410         path->leave_spinning = 1;
6411         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6412                                       ins, size);
6413         if (ret) {
6414                 btrfs_free_path(path);
6415                 return ret;
6416         }
6417
6418         leaf = path->nodes[0];
6419         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6420                                      struct btrfs_extent_item);
6421         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6422         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6423         btrfs_set_extent_flags(leaf, extent_item,
6424                                flags | BTRFS_EXTENT_FLAG_DATA);
6425
6426         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6427         btrfs_set_extent_inline_ref_type(leaf, iref, type);
6428         if (parent > 0) {
6429                 struct btrfs_shared_data_ref *ref;
6430                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
6431                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6432                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6433         } else {
6434                 struct btrfs_extent_data_ref *ref;
6435                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6436                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6437                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6438                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6439                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6440         }
6441
6442         btrfs_mark_buffer_dirty(path->nodes[0]);
6443         btrfs_free_path(path);
6444
6445         ret = update_block_group(root, ins->objectid, ins->offset, 1);
6446         if (ret) { /* -ENOENT, logic error */
6447                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6448                         (unsigned long long)ins->objectid,
6449                         (unsigned long long)ins->offset);
6450                 BUG();
6451         }
6452         return ret;
6453 }
6454
6455 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6456                                      struct btrfs_root *root,
6457                                      u64 parent, u64 root_objectid,
6458                                      u64 flags, struct btrfs_disk_key *key,
6459                                      int level, struct btrfs_key *ins)
6460 {
6461         int ret;
6462         struct btrfs_fs_info *fs_info = root->fs_info;
6463         struct btrfs_extent_item *extent_item;
6464         struct btrfs_tree_block_info *block_info;
6465         struct btrfs_extent_inline_ref *iref;
6466         struct btrfs_path *path;
6467         struct extent_buffer *leaf;
6468         u32 size = sizeof(*extent_item) + sizeof(*iref);
6469         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6470                                                  SKINNY_METADATA);
6471
6472         if (!skinny_metadata)
6473                 size += sizeof(*block_info);
6474
6475         path = btrfs_alloc_path();
6476         if (!path)
6477                 return -ENOMEM;
6478
6479         path->leave_spinning = 1;
6480         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6481                                       ins, size);
6482         if (ret) {
6483                 btrfs_free_path(path);
6484                 return ret;
6485         }
6486
6487         leaf = path->nodes[0];
6488         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6489                                      struct btrfs_extent_item);
6490         btrfs_set_extent_refs(leaf, extent_item, 1);
6491         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6492         btrfs_set_extent_flags(leaf, extent_item,
6493                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6494
6495         if (skinny_metadata) {
6496                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6497         } else {
6498                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6499                 btrfs_set_tree_block_key(leaf, block_info, key);
6500                 btrfs_set_tree_block_level(leaf, block_info, level);
6501                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6502         }
6503
6504         if (parent > 0) {
6505                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6506                 btrfs_set_extent_inline_ref_type(leaf, iref,
6507                                                  BTRFS_SHARED_BLOCK_REF_KEY);
6508                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6509         } else {
6510                 btrfs_set_extent_inline_ref_type(leaf, iref,
6511                                                  BTRFS_TREE_BLOCK_REF_KEY);
6512                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6513         }
6514
6515         btrfs_mark_buffer_dirty(leaf);
6516         btrfs_free_path(path);
6517
6518         ret = update_block_group(root, ins->objectid, root->leafsize, 1);
6519         if (ret) { /* -ENOENT, logic error */
6520                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6521                         (unsigned long long)ins->objectid,
6522                         (unsigned long long)ins->offset);
6523                 BUG();
6524         }
6525         return ret;
6526 }
6527
6528 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6529                                      struct btrfs_root *root,
6530                                      u64 root_objectid, u64 owner,
6531                                      u64 offset, struct btrfs_key *ins)
6532 {
6533         int ret;
6534
6535         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6536
6537         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6538                                          ins->offset, 0,
6539                                          root_objectid, owner, offset,
6540                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6541         return ret;
6542 }
6543
6544 /*
6545  * this is used by the tree logging recovery code.  It records that
6546  * an extent has been allocated and makes sure to clear the free
6547  * space cache bits as well
6548  */
6549 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6550                                    struct btrfs_root *root,
6551                                    u64 root_objectid, u64 owner, u64 offset,
6552                                    struct btrfs_key *ins)
6553 {
6554         int ret;
6555         struct btrfs_block_group_cache *block_group;
6556         struct btrfs_caching_control *caching_ctl;
6557         u64 start = ins->objectid;
6558         u64 num_bytes = ins->offset;
6559
6560         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6561         cache_block_group(block_group, 0);
6562         caching_ctl = get_caching_control(block_group);
6563
6564         if (!caching_ctl) {
6565                 BUG_ON(!block_group_cache_done(block_group));
6566                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6567                 if (ret)
6568                         goto out;
6569         } else {
6570                 mutex_lock(&caching_ctl->mutex);
6571
6572                 if (start >= caching_ctl->progress) {
6573                         ret = add_excluded_extent(root, start, num_bytes);
6574                 } else if (start + num_bytes <= caching_ctl->progress) {
6575                         ret = btrfs_remove_free_space(block_group,
6576                                                       start, num_bytes);
6577                 } else {
6578                         num_bytes = caching_ctl->progress - start;
6579                         ret = btrfs_remove_free_space(block_group,
6580                                                       start, num_bytes);
6581                         if (ret)
6582                                 goto out_lock;
6583
6584                         start = caching_ctl->progress;
6585                         num_bytes = ins->objectid + ins->offset -
6586                                     caching_ctl->progress;
6587                         ret = add_excluded_extent(root, start, num_bytes);
6588                 }
6589 out_lock:
6590                 mutex_unlock(&caching_ctl->mutex);
6591                 put_caching_control(caching_ctl);
6592                 if (ret)
6593                         goto out;
6594         }
6595
6596         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6597                                           RESERVE_ALLOC_NO_ACCOUNT);
6598         BUG_ON(ret); /* logic error */
6599         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6600                                          0, owner, offset, ins, 1);
6601 out:
6602         btrfs_put_block_group(block_group);
6603         return ret;
6604 }
6605
6606 static struct extent_buffer *
6607 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6608                       u64 bytenr, u32 blocksize, int level)
6609 {
6610         struct extent_buffer *buf;
6611
6612         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6613         if (!buf)
6614                 return ERR_PTR(-ENOMEM);
6615         btrfs_set_header_generation(buf, trans->transid);
6616         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6617         btrfs_tree_lock(buf);
6618         clean_tree_block(trans, root, buf);
6619         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6620
6621         btrfs_set_lock_blocking(buf);
6622         btrfs_set_buffer_uptodate(buf);
6623
6624         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6625                 /*
6626                  * we allow two log transactions at a time, use different
6627                  * EXENT bit to differentiate dirty pages.
6628                  */
6629                 if (root->log_transid % 2 == 0)
6630                         set_extent_dirty(&root->dirty_log_pages, buf->start,
6631                                         buf->start + buf->len - 1, GFP_NOFS);
6632                 else
6633                         set_extent_new(&root->dirty_log_pages, buf->start,
6634                                         buf->start + buf->len - 1, GFP_NOFS);
6635         } else {
6636                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6637                          buf->start + buf->len - 1, GFP_NOFS);
6638         }
6639         trans->blocks_used++;
6640         /* this returns a buffer locked for blocking */
6641         return buf;
6642 }
6643
6644 static struct btrfs_block_rsv *
6645 use_block_rsv(struct btrfs_trans_handle *trans,
6646               struct btrfs_root *root, u32 blocksize)
6647 {
6648         struct btrfs_block_rsv *block_rsv;
6649         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6650         int ret;
6651
6652         block_rsv = get_block_rsv(trans, root);
6653
6654         if (block_rsv->size == 0) {
6655                 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6656                                              BTRFS_RESERVE_NO_FLUSH);
6657                 /*
6658                  * If we couldn't reserve metadata bytes try and use some from
6659                  * the global reserve.
6660                  */
6661                 if (ret && block_rsv != global_rsv) {
6662                         ret = block_rsv_use_bytes(global_rsv, blocksize);
6663                         if (!ret)
6664                                 return global_rsv;
6665                         return ERR_PTR(ret);
6666                 } else if (ret) {
6667                         return ERR_PTR(ret);
6668                 }
6669                 return block_rsv;
6670         }
6671
6672         ret = block_rsv_use_bytes(block_rsv, blocksize);
6673         if (!ret)
6674                 return block_rsv;
6675         if (ret && !block_rsv->failfast) {
6676                 if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6677                         static DEFINE_RATELIMIT_STATE(_rs,
6678                                         DEFAULT_RATELIMIT_INTERVAL * 10,
6679                                         /*DEFAULT_RATELIMIT_BURST*/ 1);
6680                         if (__ratelimit(&_rs))
6681                                 WARN(1, KERN_DEBUG
6682                                         "btrfs: block rsv returned %d\n", ret);
6683                 }
6684                 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6685                                              BTRFS_RESERVE_NO_FLUSH);
6686                 if (!ret) {
6687                         return block_rsv;
6688                 } else if (ret && block_rsv != global_rsv) {
6689                         ret = block_rsv_use_bytes(global_rsv, blocksize);
6690                         if (!ret)
6691                                 return global_rsv;
6692                 }
6693         }
6694
6695         return ERR_PTR(-ENOSPC);
6696 }
6697
6698 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6699                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
6700 {
6701         block_rsv_add_bytes(block_rsv, blocksize, 0);
6702         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6703 }
6704
6705 /*
6706  * finds a free extent and does all the dirty work required for allocation
6707  * returns the key for the extent through ins, and a tree buffer for
6708  * the first block of the extent through buf.
6709  *
6710  * returns the tree buffer or NULL.
6711  */
6712 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6713                                         struct btrfs_root *root, u32 blocksize,
6714                                         u64 parent, u64 root_objectid,
6715                                         struct btrfs_disk_key *key, int level,
6716                                         u64 hint, u64 empty_size)
6717 {
6718         struct btrfs_key ins;
6719         struct btrfs_block_rsv *block_rsv;
6720         struct extent_buffer *buf;
6721         u64 flags = 0;
6722         int ret;
6723         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6724                                                  SKINNY_METADATA);
6725
6726         block_rsv = use_block_rsv(trans, root, blocksize);
6727         if (IS_ERR(block_rsv))
6728                 return ERR_CAST(block_rsv);
6729
6730         ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
6731                                    empty_size, hint, &ins, 0);
6732         if (ret) {
6733                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6734                 return ERR_PTR(ret);
6735         }
6736
6737         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6738                                     blocksize, level);
6739         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
6740
6741         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6742                 if (parent == 0)
6743                         parent = ins.objectid;
6744                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6745         } else
6746                 BUG_ON(parent > 0);
6747
6748         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6749                 struct btrfs_delayed_extent_op *extent_op;
6750                 extent_op = btrfs_alloc_delayed_extent_op();
6751                 BUG_ON(!extent_op); /* -ENOMEM */
6752                 if (key)
6753                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
6754                 else
6755                         memset(&extent_op->key, 0, sizeof(extent_op->key));
6756                 extent_op->flags_to_set = flags;
6757                 if (skinny_metadata)
6758                         extent_op->update_key = 0;
6759                 else
6760                         extent_op->update_key = 1;
6761                 extent_op->update_flags = 1;
6762                 extent_op->is_data = 0;
6763                 extent_op->level = level;
6764
6765                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6766                                         ins.objectid,
6767                                         ins.offset, parent, root_objectid,
6768                                         level, BTRFS_ADD_DELAYED_EXTENT,
6769                                         extent_op, 0);
6770                 BUG_ON(ret); /* -ENOMEM */
6771         }
6772         return buf;
6773 }
6774
6775 struct walk_control {
6776         u64 refs[BTRFS_MAX_LEVEL];
6777         u64 flags[BTRFS_MAX_LEVEL];
6778         struct btrfs_key update_progress;
6779         int stage;
6780         int level;
6781         int shared_level;
6782         int update_ref;
6783         int keep_locks;
6784         int reada_slot;
6785         int reada_count;
6786         int for_reloc;
6787 };
6788
6789 #define DROP_REFERENCE  1
6790 #define UPDATE_BACKREF  2
6791
6792 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
6793                                      struct btrfs_root *root,
6794                                      struct walk_control *wc,
6795                                      struct btrfs_path *path)
6796 {
6797         u64 bytenr;
6798         u64 generation;
6799         u64 refs;
6800         u64 flags;
6801         u32 nritems;
6802         u32 blocksize;
6803         struct btrfs_key key;
6804         struct extent_buffer *eb;
6805         int ret;
6806         int slot;
6807         int nread = 0;
6808
6809         if (path->slots[wc->level] < wc->reada_slot) {
6810                 wc->reada_count = wc->reada_count * 2 / 3;
6811                 wc->reada_count = max(wc->reada_count, 2);
6812         } else {
6813                 wc->reada_count = wc->reada_count * 3 / 2;
6814                 wc->reada_count = min_t(int, wc->reada_count,
6815                                         BTRFS_NODEPTRS_PER_BLOCK(root));
6816         }
6817
6818         eb = path->nodes[wc->level];
6819         nritems = btrfs_header_nritems(eb);
6820         blocksize = btrfs_level_size(root, wc->level - 1);
6821
6822         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
6823                 if (nread >= wc->reada_count)
6824                         break;
6825
6826                 cond_resched();
6827                 bytenr = btrfs_node_blockptr(eb, slot);
6828                 generation = btrfs_node_ptr_generation(eb, slot);
6829
6830                 if (slot == path->slots[wc->level])
6831                         goto reada;
6832
6833                 if (wc->stage == UPDATE_BACKREF &&
6834                     generation <= root->root_key.offset)
6835                         continue;
6836
6837                 /* We don't lock the tree block, it's OK to be racy here */
6838                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
6839                                                wc->level - 1, 1, &refs,
6840                                                &flags);
6841                 /* We don't care about errors in readahead. */
6842                 if (ret < 0)
6843                         continue;
6844                 BUG_ON(refs == 0);
6845
6846                 if (wc->stage == DROP_REFERENCE) {
6847                         if (refs == 1)
6848                                 goto reada;
6849
6850                         if (wc->level == 1 &&
6851                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6852                                 continue;
6853                         if (!wc->update_ref ||
6854                             generation <= root->root_key.offset)
6855                                 continue;
6856                         btrfs_node_key_to_cpu(eb, &key, slot);
6857                         ret = btrfs_comp_cpu_keys(&key,
6858                                                   &wc->update_progress);
6859                         if (ret < 0)
6860                                 continue;
6861                 } else {
6862                         if (wc->level == 1 &&
6863                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6864                                 continue;
6865                 }
6866 reada:
6867                 ret = readahead_tree_block(root, bytenr, blocksize,
6868                                            generation);
6869                 if (ret)
6870                         break;
6871                 nread++;
6872         }
6873         wc->reada_slot = slot;
6874 }
6875
6876 /*
6877  * helper to process tree block while walking down the tree.
6878  *
6879  * when wc->stage == UPDATE_BACKREF, this function updates
6880  * back refs for pointers in the block.
6881  *
6882  * NOTE: return value 1 means we should stop walking down.
6883  */
6884 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
6885                                    struct btrfs_root *root,
6886                                    struct btrfs_path *path,
6887                                    struct walk_control *wc, int lookup_info)
6888 {
6889         int level = wc->level;
6890         struct extent_buffer *eb = path->nodes[level];
6891         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6892         int ret;
6893
6894         if (wc->stage == UPDATE_BACKREF &&
6895             btrfs_header_owner(eb) != root->root_key.objectid)
6896                 return 1;
6897
6898         /*
6899          * when reference count of tree block is 1, it won't increase
6900          * again. once full backref flag is set, we never clear it.
6901          */
6902         if (lookup_info &&
6903             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
6904              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
6905                 BUG_ON(!path->locks[level]);
6906                 ret = btrfs_lookup_extent_info(trans, root,
6907                                                eb->start, level, 1,
6908                                                &wc->refs[level],
6909                                                &wc->flags[level]);
6910                 BUG_ON(ret == -ENOMEM);
6911                 if (ret)
6912                         return ret;
6913                 BUG_ON(wc->refs[level] == 0);
6914         }
6915
6916         if (wc->stage == DROP_REFERENCE) {
6917                 if (wc->refs[level] > 1)
6918                         return 1;
6919
6920                 if (path->locks[level] && !wc->keep_locks) {
6921                         btrfs_tree_unlock_rw(eb, path->locks[level]);
6922                         path->locks[level] = 0;
6923                 }
6924                 return 0;
6925         }
6926
6927         /* wc->stage == UPDATE_BACKREF */
6928         if (!(wc->flags[level] & flag)) {
6929                 BUG_ON(!path->locks[level]);
6930                 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
6931                 BUG_ON(ret); /* -ENOMEM */
6932                 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
6933                 BUG_ON(ret); /* -ENOMEM */
6934                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
6935                                                   eb->len, flag,
6936                                                   btrfs_header_level(eb), 0);
6937                 BUG_ON(ret); /* -ENOMEM */
6938                 wc->flags[level] |= flag;
6939         }
6940
6941         /*
6942          * the block is shared by multiple trees, so it's not good to
6943          * keep the tree lock
6944          */
6945         if (path->locks[level] && level > 0) {
6946                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6947                 path->locks[level] = 0;
6948         }
6949         return 0;
6950 }
6951
6952 /*
6953  * helper to process tree block pointer.
6954  *
6955  * when wc->stage == DROP_REFERENCE, this function checks
6956  * reference count of the block pointed to. if the block
6957  * is shared and we need update back refs for the subtree
6958  * rooted at the block, this function changes wc->stage to
6959  * UPDATE_BACKREF. if the block is shared and there is no
6960  * need to update back, this function drops the reference
6961  * to the block.
6962  *
6963  * NOTE: return value 1 means we should stop walking down.
6964  */
6965 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6966                                  struct btrfs_root *root,
6967                                  struct btrfs_path *path,
6968                                  struct walk_control *wc, int *lookup_info)
6969 {
6970         u64 bytenr;
6971         u64 generation;
6972         u64 parent;
6973         u32 blocksize;
6974         struct btrfs_key key;
6975         struct extent_buffer *next;
6976         int level = wc->level;
6977         int reada = 0;
6978         int ret = 0;
6979
6980         generation = btrfs_node_ptr_generation(path->nodes[level],
6981                                                path->slots[level]);
6982         /*
6983          * if the lower level block was created before the snapshot
6984          * was created, we know there is no need to update back refs
6985          * for the subtree
6986          */
6987         if (wc->stage == UPDATE_BACKREF &&
6988             generation <= root->root_key.offset) {
6989                 *lookup_info = 1;
6990                 return 1;
6991         }
6992
6993         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
6994         blocksize = btrfs_level_size(root, level - 1);
6995
6996         next = btrfs_find_tree_block(root, bytenr, blocksize);
6997         if (!next) {
6998                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
6999                 if (!next)
7000                         return -ENOMEM;
7001                 reada = 1;
7002         }
7003         btrfs_tree_lock(next);
7004         btrfs_set_lock_blocking(next);
7005
7006         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7007                                        &wc->refs[level - 1],
7008                                        &wc->flags[level - 1]);
7009         if (ret < 0) {
7010                 btrfs_tree_unlock(next);
7011                 return ret;
7012         }
7013
7014         if (unlikely(wc->refs[level - 1] == 0)) {
7015                 btrfs_err(root->fs_info, "Missing references.");
7016                 BUG();
7017         }
7018         *lookup_info = 0;
7019
7020         if (wc->stage == DROP_REFERENCE) {
7021                 if (wc->refs[level - 1] > 1) {
7022                         if (level == 1 &&
7023                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7024                                 goto skip;
7025
7026                         if (!wc->update_ref ||
7027                             generation <= root->root_key.offset)
7028                                 goto skip;
7029
7030                         btrfs_node_key_to_cpu(path->nodes[level], &key,
7031                                               path->slots[level]);
7032                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
7033                         if (ret < 0)
7034                                 goto skip;
7035
7036                         wc->stage = UPDATE_BACKREF;
7037                         wc->shared_level = level - 1;
7038                 }
7039         } else {
7040                 if (level == 1 &&
7041                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7042                         goto skip;
7043         }
7044
7045         if (!btrfs_buffer_uptodate(next, generation, 0)) {
7046                 btrfs_tree_unlock(next);
7047                 free_extent_buffer(next);
7048                 next = NULL;
7049                 *lookup_info = 1;
7050         }
7051
7052         if (!next) {
7053                 if (reada && level == 1)
7054                         reada_walk_down(trans, root, wc, path);
7055                 next = read_tree_block(root, bytenr, blocksize, generation);
7056                 if (!next || !extent_buffer_uptodate(next)) {
7057                         free_extent_buffer(next);
7058                         return -EIO;
7059                 }
7060                 btrfs_tree_lock(next);
7061                 btrfs_set_lock_blocking(next);
7062         }
7063
7064         level--;
7065         BUG_ON(level != btrfs_header_level(next));
7066         path->nodes[level] = next;
7067         path->slots[level] = 0;
7068         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7069         wc->level = level;
7070         if (wc->level == 1)
7071                 wc->reada_slot = 0;
7072         return 0;
7073 skip:
7074         wc->refs[level - 1] = 0;
7075         wc->flags[level - 1] = 0;
7076         if (wc->stage == DROP_REFERENCE) {
7077                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
7078                         parent = path->nodes[level]->start;
7079                 } else {
7080                         BUG_ON(root->root_key.objectid !=
7081                                btrfs_header_owner(path->nodes[level]));
7082                         parent = 0;
7083                 }
7084
7085                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
7086                                 root->root_key.objectid, level - 1, 0, 0);
7087                 BUG_ON(ret); /* -ENOMEM */
7088         }
7089         btrfs_tree_unlock(next);
7090         free_extent_buffer(next);
7091         *lookup_info = 1;
7092         return 1;
7093 }
7094
7095 /*
7096  * helper to process tree block while walking up the tree.
7097  *
7098  * when wc->stage == DROP_REFERENCE, this function drops
7099  * reference count on the block.
7100  *
7101  * when wc->stage == UPDATE_BACKREF, this function changes
7102  * wc->stage back to DROP_REFERENCE if we changed wc->stage
7103  * to UPDATE_BACKREF previously while processing the block.
7104  *
7105  * NOTE: return value 1 means we should stop walking up.
7106  */
7107 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
7108                                  struct btrfs_root *root,
7109                                  struct btrfs_path *path,
7110                                  struct walk_control *wc)
7111 {
7112         int ret;
7113         int level = wc->level;
7114         struct extent_buffer *eb = path->nodes[level];
7115         u64 parent = 0;
7116
7117         if (wc->stage == UPDATE_BACKREF) {
7118                 BUG_ON(wc->shared_level < level);
7119                 if (level < wc->shared_level)
7120                         goto out;
7121
7122                 ret = find_next_key(path, level + 1, &wc->update_progress);
7123                 if (ret > 0)
7124                         wc->update_ref = 0;
7125
7126                 wc->stage = DROP_REFERENCE;
7127                 wc->shared_level = -1;
7128                 path->slots[level] = 0;
7129
7130                 /*
7131                  * check reference count again if the block isn't locked.
7132                  * we should start walking down the tree again if reference
7133                  * count is one.
7134                  */
7135                 if (!path->locks[level]) {
7136                         BUG_ON(level == 0);
7137                         btrfs_tree_lock(eb);
7138                         btrfs_set_lock_blocking(eb);
7139                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7140
7141                         ret = btrfs_lookup_extent_info(trans, root,
7142                                                        eb->start, level, 1,
7143                                                        &wc->refs[level],
7144                                                        &wc->flags[level]);
7145                         if (ret < 0) {
7146                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7147                                 path->locks[level] = 0;
7148                                 return ret;
7149                         }
7150                         BUG_ON(wc->refs[level] == 0);
7151                         if (wc->refs[level] == 1) {
7152                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7153                                 path->locks[level] = 0;
7154                                 return 1;
7155                         }
7156                 }
7157         }
7158
7159         /* wc->stage == DROP_REFERENCE */
7160         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
7161
7162         if (wc->refs[level] == 1) {
7163                 if (level == 0) {
7164                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7165                                 ret = btrfs_dec_ref(trans, root, eb, 1,
7166                                                     wc->for_reloc);
7167                         else
7168                                 ret = btrfs_dec_ref(trans, root, eb, 0,
7169                                                     wc->for_reloc);
7170                         BUG_ON(ret); /* -ENOMEM */
7171                 }
7172                 /* make block locked assertion in clean_tree_block happy */
7173                 if (!path->locks[level] &&
7174                     btrfs_header_generation(eb) == trans->transid) {
7175                         btrfs_tree_lock(eb);
7176                         btrfs_set_lock_blocking(eb);
7177                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7178                 }
7179                 clean_tree_block(trans, root, eb);
7180         }
7181
7182         if (eb == root->node) {
7183                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7184                         parent = eb->start;
7185                 else
7186                         BUG_ON(root->root_key.objectid !=
7187                                btrfs_header_owner(eb));
7188         } else {
7189                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7190                         parent = path->nodes[level + 1]->start;
7191                 else
7192                         BUG_ON(root->root_key.objectid !=
7193                                btrfs_header_owner(path->nodes[level + 1]));
7194         }
7195
7196         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
7197 out:
7198         wc->refs[level] = 0;
7199         wc->flags[level] = 0;
7200         return 0;
7201 }
7202
7203 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
7204                                    struct btrfs_root *root,
7205                                    struct btrfs_path *path,
7206                                    struct walk_control *wc)
7207 {
7208         int level = wc->level;
7209         int lookup_info = 1;
7210         int ret;
7211
7212         while (level >= 0) {
7213                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
7214                 if (ret > 0)
7215                         break;
7216
7217                 if (level == 0)
7218                         break;
7219
7220                 if (path->slots[level] >=
7221                     btrfs_header_nritems(path->nodes[level]))
7222                         break;
7223
7224                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
7225                 if (ret > 0) {
7226                         path->slots[level]++;
7227                         continue;
7228                 } else if (ret < 0)
7229                         return ret;
7230                 level = wc->level;
7231         }
7232         return 0;
7233 }
7234
7235 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
7236                                  struct btrfs_root *root,
7237                                  struct btrfs_path *path,
7238                                  struct walk_control *wc, int max_level)
7239 {
7240         int level = wc->level;
7241         int ret;
7242
7243         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
7244         while (level < max_level && path->nodes[level]) {
7245                 wc->level = level;
7246                 if (path->slots[level] + 1 <
7247                     btrfs_header_nritems(path->nodes[level])) {
7248                         path->slots[level]++;
7249                         return 0;
7250                 } else {
7251                         ret = walk_up_proc(trans, root, path, wc);
7252                         if (ret > 0)
7253                                 return 0;
7254
7255                         if (path->locks[level]) {
7256                                 btrfs_tree_unlock_rw(path->nodes[level],
7257                                                      path->locks[level]);
7258                                 path->locks[level] = 0;
7259                         }
7260                         free_extent_buffer(path->nodes[level]);
7261                         path->nodes[level] = NULL;
7262                         level++;
7263                 }
7264         }
7265         return 1;
7266 }
7267
7268 /*
7269  * drop a subvolume tree.
7270  *
7271  * this function traverses the tree freeing any blocks that only
7272  * referenced by the tree.
7273  *
7274  * when a shared tree block is found. this function decreases its
7275  * reference count by one. if update_ref is true, this function
7276  * also make sure backrefs for the shared block and all lower level
7277  * blocks are properly updated.
7278  *
7279  * If called with for_reloc == 0, may exit early with -EAGAIN
7280  */
7281 int btrfs_drop_snapshot(struct btrfs_root *root,
7282                          struct btrfs_block_rsv *block_rsv, int update_ref,
7283                          int for_reloc)
7284 {
7285         struct btrfs_path *path;
7286         struct btrfs_trans_handle *trans;
7287         struct btrfs_root *tree_root = root->fs_info->tree_root;
7288         struct btrfs_root_item *root_item = &root->root_item;
7289         struct walk_control *wc;
7290         struct btrfs_key key;
7291         int err = 0;
7292         int ret;
7293         int level;
7294
7295         path = btrfs_alloc_path();
7296         if (!path) {
7297                 err = -ENOMEM;
7298                 goto out;
7299         }
7300
7301         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7302         if (!wc) {
7303                 btrfs_free_path(path);
7304                 err = -ENOMEM;
7305                 goto out;
7306         }
7307
7308         trans = btrfs_start_transaction(tree_root, 0);
7309         if (IS_ERR(trans)) {
7310                 err = PTR_ERR(trans);
7311                 goto out_free;
7312         }
7313
7314         if (block_rsv)
7315                 trans->block_rsv = block_rsv;
7316
7317         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
7318                 level = btrfs_header_level(root->node);
7319                 path->nodes[level] = btrfs_lock_root_node(root);
7320                 btrfs_set_lock_blocking(path->nodes[level]);
7321                 path->slots[level] = 0;
7322                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7323                 memset(&wc->update_progress, 0,
7324                        sizeof(wc->update_progress));
7325         } else {
7326                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
7327                 memcpy(&wc->update_progress, &key,
7328                        sizeof(wc->update_progress));
7329
7330                 level = root_item->drop_level;
7331                 BUG_ON(level == 0);
7332                 path->lowest_level = level;
7333                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7334                 path->lowest_level = 0;
7335                 if (ret < 0) {
7336                         err = ret;
7337                         goto out_end_trans;
7338                 }
7339                 WARN_ON(ret > 0);
7340
7341                 /*
7342                  * unlock our path, this is safe because only this
7343                  * function is allowed to delete this snapshot
7344                  */
7345                 btrfs_unlock_up_safe(path, 0);
7346
7347                 level = btrfs_header_level(root->node);
7348                 while (1) {
7349                         btrfs_tree_lock(path->nodes[level]);
7350                         btrfs_set_lock_blocking(path->nodes[level]);
7351
7352                         ret = btrfs_lookup_extent_info(trans, root,
7353                                                 path->nodes[level]->start,
7354                                                 level, 1, &wc->refs[level],
7355                                                 &wc->flags[level]);
7356                         if (ret < 0) {
7357                                 err = ret;
7358                                 goto out_end_trans;
7359                         }
7360                         BUG_ON(wc->refs[level] == 0);
7361
7362                         if (level == root_item->drop_level)
7363                                 break;
7364
7365                         btrfs_tree_unlock(path->nodes[level]);
7366                         WARN_ON(wc->refs[level] != 1);
7367                         level--;
7368                 }
7369         }
7370
7371         wc->level = level;
7372         wc->shared_level = -1;
7373         wc->stage = DROP_REFERENCE;
7374         wc->update_ref = update_ref;
7375         wc->keep_locks = 0;
7376         wc->for_reloc = for_reloc;
7377         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7378
7379         while (1) {
7380                 if (!for_reloc && btrfs_fs_closing(root->fs_info)) {
7381                         pr_debug("btrfs: drop snapshot early exit\n");
7382                         err = -EAGAIN;
7383                         goto out_end_trans;
7384                 }
7385
7386                 ret = walk_down_tree(trans, root, path, wc);
7387                 if (ret < 0) {
7388                         err = ret;
7389                         break;
7390                 }
7391
7392                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7393                 if (ret < 0) {
7394                         err = ret;
7395                         break;
7396                 }
7397
7398                 if (ret > 0) {
7399                         BUG_ON(wc->stage != DROP_REFERENCE);
7400                         break;
7401                 }
7402
7403                 if (wc->stage == DROP_REFERENCE) {
7404                         level = wc->level;
7405                         btrfs_node_key(path->nodes[level],
7406                                        &root_item->drop_progress,
7407                                        path->slots[level]);
7408                         root_item->drop_level = level;
7409                 }
7410
7411                 BUG_ON(wc->level == 0);
7412                 if (btrfs_should_end_transaction(trans, tree_root)) {
7413                         ret = btrfs_update_root(trans, tree_root,
7414                                                 &root->root_key,
7415                                                 root_item);
7416                         if (ret) {
7417                                 btrfs_abort_transaction(trans, tree_root, ret);
7418                                 err = ret;
7419                                 goto out_end_trans;
7420                         }
7421
7422                         btrfs_end_transaction_throttle(trans, tree_root);
7423                         trans = btrfs_start_transaction(tree_root, 0);
7424                         if (IS_ERR(trans)) {
7425                                 err = PTR_ERR(trans);
7426                                 goto out_free;
7427                         }
7428                         if (block_rsv)
7429                                 trans->block_rsv = block_rsv;
7430                 }
7431         }
7432         btrfs_release_path(path);
7433         if (err)
7434                 goto out_end_trans;
7435
7436         ret = btrfs_del_root(trans, tree_root, &root->root_key);
7437         if (ret) {
7438                 btrfs_abort_transaction(trans, tree_root, ret);
7439                 goto out_end_trans;
7440         }
7441
7442         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7443                 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
7444                                            NULL, NULL);
7445                 if (ret < 0) {
7446                         btrfs_abort_transaction(trans, tree_root, ret);
7447                         err = ret;
7448                         goto out_end_trans;
7449                 } else if (ret > 0) {
7450                         /* if we fail to delete the orphan item this time
7451                          * around, it'll get picked up the next time.
7452                          *
7453                          * The most common failure here is just -ENOENT.
7454                          */
7455                         btrfs_del_orphan_item(trans, tree_root,
7456                                               root->root_key.objectid);
7457                 }
7458         }
7459
7460         if (root->in_radix) {
7461                 btrfs_free_fs_root(tree_root->fs_info, root);
7462         } else {
7463                 free_extent_buffer(root->node);
7464                 free_extent_buffer(root->commit_root);
7465                 kfree(root);
7466         }
7467 out_end_trans:
7468         btrfs_end_transaction_throttle(trans, tree_root);
7469 out_free:
7470         kfree(wc);
7471         btrfs_free_path(path);
7472 out:
7473         if (err)
7474                 btrfs_std_error(root->fs_info, err);
7475         return err;
7476 }
7477
7478 /*
7479  * drop subtree rooted at tree block 'node'.
7480  *
7481  * NOTE: this function will unlock and release tree block 'node'
7482  * only used by relocation code
7483  */
7484 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7485                         struct btrfs_root *root,
7486                         struct extent_buffer *node,
7487                         struct extent_buffer *parent)
7488 {
7489         struct btrfs_path *path;
7490         struct walk_control *wc;
7491         int level;
7492         int parent_level;
7493         int ret = 0;
7494         int wret;
7495
7496         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7497
7498         path = btrfs_alloc_path();
7499         if (!path)
7500                 return -ENOMEM;
7501
7502         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7503         if (!wc) {
7504                 btrfs_free_path(path);
7505                 return -ENOMEM;
7506         }
7507
7508         btrfs_assert_tree_locked(parent);
7509         parent_level = btrfs_header_level(parent);
7510         extent_buffer_get(parent);
7511         path->nodes[parent_level] = parent;
7512         path->slots[parent_level] = btrfs_header_nritems(parent);
7513
7514         btrfs_assert_tree_locked(node);
7515         level = btrfs_header_level(node);
7516         path->nodes[level] = node;
7517         path->slots[level] = 0;
7518         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7519
7520         wc->refs[parent_level] = 1;
7521         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7522         wc->level = level;
7523         wc->shared_level = -1;
7524         wc->stage = DROP_REFERENCE;
7525         wc->update_ref = 0;
7526         wc->keep_locks = 1;
7527         wc->for_reloc = 1;
7528         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7529
7530         while (1) {
7531                 wret = walk_down_tree(trans, root, path, wc);
7532                 if (wret < 0) {
7533                         ret = wret;
7534                         break;
7535                 }
7536
7537                 wret = walk_up_tree(trans, root, path, wc, parent_level);
7538                 if (wret < 0)
7539                         ret = wret;
7540                 if (wret != 0)
7541                         break;
7542         }
7543
7544         kfree(wc);
7545         btrfs_free_path(path);
7546         return ret;
7547 }
7548
7549 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7550 {
7551         u64 num_devices;
7552         u64 stripped;
7553
7554         /*
7555          * if restripe for this chunk_type is on pick target profile and
7556          * return, otherwise do the usual balance
7557          */
7558         stripped = get_restripe_target(root->fs_info, flags);
7559         if (stripped)
7560                 return extended_to_chunk(stripped);
7561
7562         /*
7563          * we add in the count of missing devices because we want
7564          * to make sure that any RAID levels on a degraded FS
7565          * continue to be honored.
7566          */
7567         num_devices = root->fs_info->fs_devices->rw_devices +
7568                 root->fs_info->fs_devices->missing_devices;
7569
7570         stripped = BTRFS_BLOCK_GROUP_RAID0 |
7571                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
7572                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7573
7574         if (num_devices == 1) {
7575                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7576                 stripped = flags & ~stripped;
7577
7578                 /* turn raid0 into single device chunks */
7579                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7580                         return stripped;
7581
7582                 /* turn mirroring into duplication */
7583                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7584                              BTRFS_BLOCK_GROUP_RAID10))
7585                         return stripped | BTRFS_BLOCK_GROUP_DUP;
7586         } else {
7587                 /* they already had raid on here, just return */
7588                 if (flags & stripped)
7589                         return flags;
7590
7591                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7592                 stripped = flags & ~stripped;
7593
7594                 /* switch duplicated blocks with raid1 */
7595                 if (flags & BTRFS_BLOCK_GROUP_DUP)
7596                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
7597
7598                 /* this is drive concat, leave it alone */
7599         }
7600
7601         return flags;
7602 }
7603
7604 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7605 {
7606         struct btrfs_space_info *sinfo = cache->space_info;
7607         u64 num_bytes;
7608         u64 min_allocable_bytes;
7609         int ret = -ENOSPC;
7610
7611
7612         /*
7613          * We need some metadata space and system metadata space for
7614          * allocating chunks in some corner cases until we force to set
7615          * it to be readonly.
7616          */
7617         if ((sinfo->flags &
7618              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7619             !force)
7620                 min_allocable_bytes = 1 * 1024 * 1024;
7621         else
7622                 min_allocable_bytes = 0;
7623
7624         spin_lock(&sinfo->lock);
7625         spin_lock(&cache->lock);
7626
7627         if (cache->ro) {
7628                 ret = 0;
7629                 goto out;
7630         }
7631
7632         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7633                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7634
7635         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7636             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7637             min_allocable_bytes <= sinfo->total_bytes) {
7638                 sinfo->bytes_readonly += num_bytes;
7639                 cache->ro = 1;
7640                 ret = 0;
7641         }
7642 out:
7643         spin_unlock(&cache->lock);
7644         spin_unlock(&sinfo->lock);
7645         return ret;
7646 }
7647
7648 int btrfs_set_block_group_ro(struct btrfs_root *root,
7649                              struct btrfs_block_group_cache *cache)
7650
7651 {
7652         struct btrfs_trans_handle *trans;
7653         u64 alloc_flags;
7654         int ret;
7655
7656         BUG_ON(cache->ro);
7657
7658         trans = btrfs_join_transaction(root);
7659         if (IS_ERR(trans))
7660                 return PTR_ERR(trans);
7661
7662         alloc_flags = update_block_group_flags(root, cache->flags);
7663         if (alloc_flags != cache->flags) {
7664                 ret = do_chunk_alloc(trans, root, alloc_flags,
7665                                      CHUNK_ALLOC_FORCE);
7666                 if (ret < 0)
7667                         goto out;
7668         }
7669
7670         ret = set_block_group_ro(cache, 0);
7671         if (!ret)
7672                 goto out;
7673         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7674         ret = do_chunk_alloc(trans, root, alloc_flags,
7675                              CHUNK_ALLOC_FORCE);
7676         if (ret < 0)
7677                 goto out;
7678         ret = set_block_group_ro(cache, 0);
7679 out:
7680         btrfs_end_transaction(trans, root);
7681         return ret;
7682 }
7683
7684 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7685                             struct btrfs_root *root, u64 type)
7686 {
7687         u64 alloc_flags = get_alloc_profile(root, type);
7688         return do_chunk_alloc(trans, root, alloc_flags,
7689                               CHUNK_ALLOC_FORCE);
7690 }
7691
7692 /*
7693  * helper to account the unused space of all the readonly block group in the
7694  * list. takes mirrors into account.
7695  */
7696 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7697 {
7698         struct btrfs_block_group_cache *block_group;
7699         u64 free_bytes = 0;
7700         int factor;
7701
7702         list_for_each_entry(block_group, groups_list, list) {
7703                 spin_lock(&block_group->lock);
7704
7705                 if (!block_group->ro) {
7706                         spin_unlock(&block_group->lock);
7707                         continue;
7708                 }
7709
7710                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7711                                           BTRFS_BLOCK_GROUP_RAID10 |
7712                                           BTRFS_BLOCK_GROUP_DUP))
7713                         factor = 2;
7714                 else
7715                         factor = 1;
7716
7717                 free_bytes += (block_group->key.offset -
7718                                btrfs_block_group_used(&block_group->item)) *
7719                                factor;
7720
7721                 spin_unlock(&block_group->lock);
7722         }
7723
7724         return free_bytes;
7725 }
7726
7727 /*
7728  * helper to account the unused space of all the readonly block group in the
7729  * space_info. takes mirrors into account.
7730  */
7731 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7732 {
7733         int i;
7734         u64 free_bytes = 0;
7735
7736         spin_lock(&sinfo->lock);
7737
7738         for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
7739                 if (!list_empty(&sinfo->block_groups[i]))
7740                         free_bytes += __btrfs_get_ro_block_group_free_space(
7741                                                 &sinfo->block_groups[i]);
7742
7743         spin_unlock(&sinfo->lock);
7744
7745         return free_bytes;
7746 }
7747
7748 void btrfs_set_block_group_rw(struct btrfs_root *root,
7749                               struct btrfs_block_group_cache *cache)
7750 {
7751         struct btrfs_space_info *sinfo = cache->space_info;
7752         u64 num_bytes;
7753
7754         BUG_ON(!cache->ro);
7755
7756         spin_lock(&sinfo->lock);
7757         spin_lock(&cache->lock);
7758         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7759                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7760         sinfo->bytes_readonly -= num_bytes;
7761         cache->ro = 0;
7762         spin_unlock(&cache->lock);
7763         spin_unlock(&sinfo->lock);
7764 }
7765
7766 /*
7767  * checks to see if its even possible to relocate this block group.
7768  *
7769  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7770  * ok to go ahead and try.
7771  */
7772 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7773 {
7774         struct btrfs_block_group_cache *block_group;
7775         struct btrfs_space_info *space_info;
7776         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7777         struct btrfs_device *device;
7778         u64 min_free;
7779         u64 dev_min = 1;
7780         u64 dev_nr = 0;
7781         u64 target;
7782         int index;
7783         int full = 0;
7784         int ret = 0;
7785
7786         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7787
7788         /* odd, couldn't find the block group, leave it alone */
7789         if (!block_group)
7790                 return -1;
7791
7792         min_free = btrfs_block_group_used(&block_group->item);
7793
7794         /* no bytes used, we're good */
7795         if (!min_free)
7796                 goto out;
7797
7798         space_info = block_group->space_info;
7799         spin_lock(&space_info->lock);
7800
7801         full = space_info->full;
7802
7803         /*
7804          * if this is the last block group we have in this space, we can't
7805          * relocate it unless we're able to allocate a new chunk below.
7806          *
7807          * Otherwise, we need to make sure we have room in the space to handle
7808          * all of the extents from this block group.  If we can, we're good
7809          */
7810         if ((space_info->total_bytes != block_group->key.offset) &&
7811             (space_info->bytes_used + space_info->bytes_reserved +
7812              space_info->bytes_pinned + space_info->bytes_readonly +
7813              min_free < space_info->total_bytes)) {
7814                 spin_unlock(&space_info->lock);
7815                 goto out;
7816         }
7817         spin_unlock(&space_info->lock);
7818
7819         /*
7820          * ok we don't have enough space, but maybe we have free space on our
7821          * devices to allocate new chunks for relocation, so loop through our
7822          * alloc devices and guess if we have enough space.  if this block
7823          * group is going to be restriped, run checks against the target
7824          * profile instead of the current one.
7825          */
7826         ret = -1;
7827
7828         /*
7829          * index:
7830          *      0: raid10
7831          *      1: raid1
7832          *      2: dup
7833          *      3: raid0
7834          *      4: single
7835          */
7836         target = get_restripe_target(root->fs_info, block_group->flags);
7837         if (target) {
7838                 index = __get_raid_index(extended_to_chunk(target));
7839         } else {
7840                 /*
7841                  * this is just a balance, so if we were marked as full
7842                  * we know there is no space for a new chunk
7843                  */
7844                 if (full)
7845                         goto out;
7846
7847                 index = get_block_group_index(block_group);
7848         }
7849
7850         if (index == BTRFS_RAID_RAID10) {
7851                 dev_min = 4;
7852                 /* Divide by 2 */
7853                 min_free >>= 1;
7854         } else if (index == BTRFS_RAID_RAID1) {
7855                 dev_min = 2;
7856         } else if (index == BTRFS_RAID_DUP) {
7857                 /* Multiply by 2 */
7858                 min_free <<= 1;
7859         } else if (index == BTRFS_RAID_RAID0) {
7860                 dev_min = fs_devices->rw_devices;
7861                 do_div(min_free, dev_min);
7862         }
7863
7864         mutex_lock(&root->fs_info->chunk_mutex);
7865         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7866                 u64 dev_offset;
7867
7868                 /*
7869                  * check to make sure we can actually find a chunk with enough
7870                  * space to fit our block group in.
7871                  */
7872                 if (device->total_bytes > device->bytes_used + min_free &&
7873                     !device->is_tgtdev_for_dev_replace) {
7874                         ret = find_free_dev_extent(device, min_free,
7875                                                    &dev_offset, NULL);
7876                         if (!ret)
7877                                 dev_nr++;
7878
7879                         if (dev_nr >= dev_min)
7880                                 break;
7881
7882                         ret = -1;
7883                 }
7884         }
7885         mutex_unlock(&root->fs_info->chunk_mutex);
7886 out:
7887         btrfs_put_block_group(block_group);
7888         return ret;
7889 }
7890
7891 static int find_first_block_group(struct btrfs_root *root,
7892                 struct btrfs_path *path, struct btrfs_key *key)
7893 {
7894         int ret = 0;
7895         struct btrfs_key found_key;
7896         struct extent_buffer *leaf;
7897         int slot;
7898
7899         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7900         if (ret < 0)
7901                 goto out;
7902
7903         while (1) {
7904                 slot = path->slots[0];
7905                 leaf = path->nodes[0];
7906                 if (slot >= btrfs_header_nritems(leaf)) {
7907                         ret = btrfs_next_leaf(root, path);
7908                         if (ret == 0)
7909                                 continue;
7910                         if (ret < 0)
7911                                 goto out;
7912                         break;
7913                 }
7914                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7915
7916                 if (found_key.objectid >= key->objectid &&
7917                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7918                         ret = 0;
7919                         goto out;
7920                 }
7921                 path->slots[0]++;
7922         }
7923 out:
7924         return ret;
7925 }
7926
7927 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
7928 {
7929         struct btrfs_block_group_cache *block_group;
7930         u64 last = 0;
7931
7932         while (1) {
7933                 struct inode *inode;
7934
7935                 block_group = btrfs_lookup_first_block_group(info, last);
7936                 while (block_group) {
7937                         spin_lock(&block_group->lock);
7938                         if (block_group->iref)
7939                                 break;
7940                         spin_unlock(&block_group->lock);
7941                         block_group = next_block_group(info->tree_root,
7942                                                        block_group);
7943                 }
7944                 if (!block_group) {
7945                         if (last == 0)
7946                                 break;
7947                         last = 0;
7948                         continue;
7949                 }
7950
7951                 inode = block_group->inode;
7952                 block_group->iref = 0;
7953                 block_group->inode = NULL;
7954                 spin_unlock(&block_group->lock);
7955                 iput(inode);
7956                 last = block_group->key.objectid + block_group->key.offset;
7957                 btrfs_put_block_group(block_group);
7958         }
7959 }
7960
7961 int btrfs_free_block_groups(struct btrfs_fs_info *info)
7962 {
7963         struct btrfs_block_group_cache *block_group;
7964         struct btrfs_space_info *space_info;
7965         struct btrfs_caching_control *caching_ctl;
7966         struct rb_node *n;
7967
7968         down_write(&info->extent_commit_sem);
7969         while (!list_empty(&info->caching_block_groups)) {
7970                 caching_ctl = list_entry(info->caching_block_groups.next,
7971                                          struct btrfs_caching_control, list);
7972                 list_del(&caching_ctl->list);
7973                 put_caching_control(caching_ctl);
7974         }
7975         up_write(&info->extent_commit_sem);
7976
7977         spin_lock(&info->block_group_cache_lock);
7978         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7979                 block_group = rb_entry(n, struct btrfs_block_group_cache,
7980                                        cache_node);
7981                 rb_erase(&block_group->cache_node,
7982                          &info->block_group_cache_tree);
7983                 spin_unlock(&info->block_group_cache_lock);
7984
7985                 down_write(&block_group->space_info->groups_sem);
7986                 list_del(&block_group->list);
7987                 up_write(&block_group->space_info->groups_sem);
7988
7989                 if (block_group->cached == BTRFS_CACHE_STARTED)
7990                         wait_block_group_cache_done(block_group);
7991
7992                 /*
7993                  * We haven't cached this block group, which means we could
7994                  * possibly have excluded extents on this block group.
7995                  */
7996                 if (block_group->cached == BTRFS_CACHE_NO)
7997                         free_excluded_extents(info->extent_root, block_group);
7998
7999                 btrfs_remove_free_space_cache(block_group);
8000                 btrfs_put_block_group(block_group);
8001
8002                 spin_lock(&info->block_group_cache_lock);
8003         }
8004         spin_unlock(&info->block_group_cache_lock);
8005
8006         /* now that all the block groups are freed, go through and
8007          * free all the space_info structs.  This is only called during
8008          * the final stages of unmount, and so we know nobody is
8009          * using them.  We call synchronize_rcu() once before we start,
8010          * just to be on the safe side.
8011          */
8012         synchronize_rcu();
8013
8014         release_global_block_rsv(info);
8015
8016         while(!list_empty(&info->space_info)) {
8017                 space_info = list_entry(info->space_info.next,
8018                                         struct btrfs_space_info,
8019                                         list);
8020                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8021                         if (space_info->bytes_pinned > 0 ||
8022                             space_info->bytes_reserved > 0 ||
8023                             space_info->bytes_may_use > 0) {
8024                                 WARN_ON(1);
8025                                 dump_space_info(space_info, 0, 0);
8026                         }
8027                 }
8028                 list_del(&space_info->list);
8029                 kfree(space_info);
8030         }
8031         return 0;
8032 }
8033
8034 static void __link_block_group(struct btrfs_space_info *space_info,
8035                                struct btrfs_block_group_cache *cache)
8036 {
8037         int index = get_block_group_index(cache);
8038
8039         down_write(&space_info->groups_sem);
8040         list_add_tail(&cache->list, &space_info->block_groups[index]);
8041         up_write(&space_info->groups_sem);
8042 }
8043
8044 int btrfs_read_block_groups(struct btrfs_root *root)
8045 {
8046         struct btrfs_path *path;
8047         int ret;
8048         struct btrfs_block_group_cache *cache;
8049         struct btrfs_fs_info *info = root->fs_info;
8050         struct btrfs_space_info *space_info;
8051         struct btrfs_key key;
8052         struct btrfs_key found_key;
8053         struct extent_buffer *leaf;
8054         int need_clear = 0;
8055         u64 cache_gen;
8056
8057         root = info->extent_root;
8058         key.objectid = 0;
8059         key.offset = 0;
8060         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
8061         path = btrfs_alloc_path();
8062         if (!path)
8063                 return -ENOMEM;
8064         path->reada = 1;
8065
8066         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
8067         if (btrfs_test_opt(root, SPACE_CACHE) &&
8068             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
8069                 need_clear = 1;
8070         if (btrfs_test_opt(root, CLEAR_CACHE))
8071                 need_clear = 1;
8072
8073         while (1) {
8074                 ret = find_first_block_group(root, path, &key);
8075                 if (ret > 0)
8076                         break;
8077                 if (ret != 0)
8078                         goto error;
8079                 leaf = path->nodes[0];
8080                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8081                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8082                 if (!cache) {
8083                         ret = -ENOMEM;
8084                         goto error;
8085                 }
8086                 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8087                                                 GFP_NOFS);
8088                 if (!cache->free_space_ctl) {
8089                         kfree(cache);
8090                         ret = -ENOMEM;
8091                         goto error;
8092                 }
8093
8094                 atomic_set(&cache->count, 1);
8095                 spin_lock_init(&cache->lock);
8096                 cache->fs_info = info;
8097                 INIT_LIST_HEAD(&cache->list);
8098                 INIT_LIST_HEAD(&cache->cluster_list);
8099
8100                 if (need_clear) {
8101                         /*
8102                          * When we mount with old space cache, we need to
8103                          * set BTRFS_DC_CLEAR and set dirty flag.
8104                          *
8105                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
8106                          *    truncate the old free space cache inode and
8107                          *    setup a new one.
8108                          * b) Setting 'dirty flag' makes sure that we flush
8109                          *    the new space cache info onto disk.
8110                          */
8111                         cache->disk_cache_state = BTRFS_DC_CLEAR;
8112                         if (btrfs_test_opt(root, SPACE_CACHE))
8113                                 cache->dirty = 1;
8114                 }
8115
8116                 read_extent_buffer(leaf, &cache->item,
8117                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
8118                                    sizeof(cache->item));
8119                 memcpy(&cache->key, &found_key, sizeof(found_key));
8120
8121                 key.objectid = found_key.objectid + found_key.offset;
8122                 btrfs_release_path(path);
8123                 cache->flags = btrfs_block_group_flags(&cache->item);
8124                 cache->sectorsize = root->sectorsize;
8125                 cache->full_stripe_len = btrfs_full_stripe_len(root,
8126                                                &root->fs_info->mapping_tree,
8127                                                found_key.objectid);
8128                 btrfs_init_free_space_ctl(cache);
8129
8130                 /*
8131                  * We need to exclude the super stripes now so that the space
8132                  * info has super bytes accounted for, otherwise we'll think
8133                  * we have more space than we actually do.
8134                  */
8135                 ret = exclude_super_stripes(root, cache);
8136                 if (ret) {
8137                         /*
8138                          * We may have excluded something, so call this just in
8139                          * case.
8140                          */
8141                         free_excluded_extents(root, cache);
8142                         kfree(cache->free_space_ctl);
8143                         kfree(cache);
8144                         goto error;
8145                 }
8146
8147                 /*
8148                  * check for two cases, either we are full, and therefore
8149                  * don't need to bother with the caching work since we won't
8150                  * find any space, or we are empty, and we can just add all
8151                  * the space in and be done with it.  This saves us _alot_ of
8152                  * time, particularly in the full case.
8153                  */
8154                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
8155                         cache->last_byte_to_unpin = (u64)-1;
8156                         cache->cached = BTRFS_CACHE_FINISHED;
8157                         free_excluded_extents(root, cache);
8158                 } else if (btrfs_block_group_used(&cache->item) == 0) {
8159                         cache->last_byte_to_unpin = (u64)-1;
8160                         cache->cached = BTRFS_CACHE_FINISHED;
8161                         add_new_free_space(cache, root->fs_info,
8162                                            found_key.objectid,
8163                                            found_key.objectid +
8164                                            found_key.offset);
8165                         free_excluded_extents(root, cache);
8166                 }
8167
8168                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8169                 if (ret) {
8170                         btrfs_remove_free_space_cache(cache);
8171                         btrfs_put_block_group(cache);
8172                         goto error;
8173                 }
8174
8175                 ret = update_space_info(info, cache->flags, found_key.offset,
8176                                         btrfs_block_group_used(&cache->item),
8177                                         &space_info);
8178                 if (ret) {
8179                         btrfs_remove_free_space_cache(cache);
8180                         spin_lock(&info->block_group_cache_lock);
8181                         rb_erase(&cache->cache_node,
8182                                  &info->block_group_cache_tree);
8183                         spin_unlock(&info->block_group_cache_lock);
8184                         btrfs_put_block_group(cache);
8185                         goto error;
8186                 }
8187
8188                 cache->space_info = space_info;
8189                 spin_lock(&cache->space_info->lock);
8190                 cache->space_info->bytes_readonly += cache->bytes_super;
8191                 spin_unlock(&cache->space_info->lock);
8192
8193                 __link_block_group(space_info, cache);
8194
8195                 set_avail_alloc_bits(root->fs_info, cache->flags);
8196                 if (btrfs_chunk_readonly(root, cache->key.objectid))
8197                         set_block_group_ro(cache, 1);
8198         }
8199
8200         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
8201                 if (!(get_alloc_profile(root, space_info->flags) &
8202                       (BTRFS_BLOCK_GROUP_RAID10 |
8203                        BTRFS_BLOCK_GROUP_RAID1 |
8204                        BTRFS_BLOCK_GROUP_RAID5 |
8205                        BTRFS_BLOCK_GROUP_RAID6 |
8206                        BTRFS_BLOCK_GROUP_DUP)))
8207                         continue;
8208                 /*
8209                  * avoid allocating from un-mirrored block group if there are
8210                  * mirrored block groups.
8211                  */
8212                 list_for_each_entry(cache, &space_info->block_groups[3], list)
8213                         set_block_group_ro(cache, 1);
8214                 list_for_each_entry(cache, &space_info->block_groups[4], list)
8215                         set_block_group_ro(cache, 1);
8216         }
8217
8218         init_global_block_rsv(info);
8219         ret = 0;
8220 error:
8221         btrfs_free_path(path);
8222         return ret;
8223 }
8224
8225 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
8226                                        struct btrfs_root *root)
8227 {
8228         struct btrfs_block_group_cache *block_group, *tmp;
8229         struct btrfs_root *extent_root = root->fs_info->extent_root;
8230         struct btrfs_block_group_item item;
8231         struct btrfs_key key;
8232         int ret = 0;
8233
8234         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
8235                                  new_bg_list) {
8236                 list_del_init(&block_group->new_bg_list);
8237
8238                 if (ret)
8239                         continue;
8240
8241                 spin_lock(&block_group->lock);
8242                 memcpy(&item, &block_group->item, sizeof(item));
8243                 memcpy(&key, &block_group->key, sizeof(key));
8244                 spin_unlock(&block_group->lock);
8245
8246                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
8247                                         sizeof(item));
8248                 if (ret)
8249                         btrfs_abort_transaction(trans, extent_root, ret);
8250         }
8251 }
8252
8253 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8254                            struct btrfs_root *root, u64 bytes_used,
8255                            u64 type, u64 chunk_objectid, u64 chunk_offset,
8256                            u64 size)
8257 {
8258         int ret;
8259         struct btrfs_root *extent_root;
8260         struct btrfs_block_group_cache *cache;
8261
8262         extent_root = root->fs_info->extent_root;
8263
8264         root->fs_info->last_trans_log_full_commit = trans->transid;
8265
8266         cache = kzalloc(sizeof(*cache), GFP_NOFS);
8267         if (!cache)
8268                 return -ENOMEM;
8269         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8270                                         GFP_NOFS);
8271         if (!cache->free_space_ctl) {
8272                 kfree(cache);
8273                 return -ENOMEM;
8274         }
8275
8276         cache->key.objectid = chunk_offset;
8277         cache->key.offset = size;
8278         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8279         cache->sectorsize = root->sectorsize;
8280         cache->fs_info = root->fs_info;
8281         cache->full_stripe_len = btrfs_full_stripe_len(root,
8282                                                &root->fs_info->mapping_tree,
8283                                                chunk_offset);
8284
8285         atomic_set(&cache->count, 1);
8286         spin_lock_init(&cache->lock);
8287         INIT_LIST_HEAD(&cache->list);
8288         INIT_LIST_HEAD(&cache->cluster_list);
8289         INIT_LIST_HEAD(&cache->new_bg_list);
8290
8291         btrfs_init_free_space_ctl(cache);
8292
8293         btrfs_set_block_group_used(&cache->item, bytes_used);
8294         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8295         cache->flags = type;
8296         btrfs_set_block_group_flags(&cache->item, type);
8297
8298         cache->last_byte_to_unpin = (u64)-1;
8299         cache->cached = BTRFS_CACHE_FINISHED;
8300         ret = exclude_super_stripes(root, cache);
8301         if (ret) {
8302                 /*
8303                  * We may have excluded something, so call this just in
8304                  * case.
8305                  */
8306                 free_excluded_extents(root, cache);
8307                 kfree(cache->free_space_ctl);
8308                 kfree(cache);
8309                 return ret;
8310         }
8311
8312         add_new_free_space(cache, root->fs_info, chunk_offset,
8313                            chunk_offset + size);
8314
8315         free_excluded_extents(root, cache);
8316
8317         ret = btrfs_add_block_group_cache(root->fs_info, cache);
8318         if (ret) {
8319                 btrfs_remove_free_space_cache(cache);
8320                 btrfs_put_block_group(cache);
8321                 return ret;
8322         }
8323
8324         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8325                                 &cache->space_info);
8326         if (ret) {
8327                 btrfs_remove_free_space_cache(cache);
8328                 spin_lock(&root->fs_info->block_group_cache_lock);
8329                 rb_erase(&cache->cache_node,
8330                          &root->fs_info->block_group_cache_tree);
8331                 spin_unlock(&root->fs_info->block_group_cache_lock);
8332                 btrfs_put_block_group(cache);
8333                 return ret;
8334         }
8335         update_global_block_rsv(root->fs_info);
8336
8337         spin_lock(&cache->space_info->lock);
8338         cache->space_info->bytes_readonly += cache->bytes_super;
8339         spin_unlock(&cache->space_info->lock);
8340
8341         __link_block_group(cache->space_info, cache);
8342
8343         list_add_tail(&cache->new_bg_list, &trans->new_bgs);
8344
8345         set_avail_alloc_bits(extent_root->fs_info, type);
8346
8347         return 0;
8348 }
8349
8350 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
8351 {
8352         u64 extra_flags = chunk_to_extended(flags) &
8353                                 BTRFS_EXTENDED_PROFILE_MASK;
8354
8355         write_seqlock(&fs_info->profiles_lock);
8356         if (flags & BTRFS_BLOCK_GROUP_DATA)
8357                 fs_info->avail_data_alloc_bits &= ~extra_flags;
8358         if (flags & BTRFS_BLOCK_GROUP_METADATA)
8359                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
8360         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
8361                 fs_info->avail_system_alloc_bits &= ~extra_flags;
8362         write_sequnlock(&fs_info->profiles_lock);
8363 }
8364
8365 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8366                              struct btrfs_root *root, u64 group_start)
8367 {
8368         struct btrfs_path *path;
8369         struct btrfs_block_group_cache *block_group;
8370         struct btrfs_free_cluster *cluster;
8371         struct btrfs_root *tree_root = root->fs_info->tree_root;
8372         struct btrfs_key key;
8373         struct inode *inode;
8374         int ret;
8375         int index;
8376         int factor;
8377
8378         root = root->fs_info->extent_root;
8379
8380         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8381         BUG_ON(!block_group);
8382         BUG_ON(!block_group->ro);
8383
8384         /*
8385          * Free the reserved super bytes from this block group before
8386          * remove it.
8387          */
8388         free_excluded_extents(root, block_group);
8389
8390         memcpy(&key, &block_group->key, sizeof(key));
8391         index = get_block_group_index(block_group);
8392         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8393                                   BTRFS_BLOCK_GROUP_RAID1 |
8394                                   BTRFS_BLOCK_GROUP_RAID10))
8395                 factor = 2;
8396         else
8397                 factor = 1;
8398
8399         /* make sure this block group isn't part of an allocation cluster */
8400         cluster = &root->fs_info->data_alloc_cluster;
8401         spin_lock(&cluster->refill_lock);
8402         btrfs_return_cluster_to_free_space(block_group, cluster);
8403         spin_unlock(&cluster->refill_lock);
8404
8405         /*
8406          * make sure this block group isn't part of a metadata
8407          * allocation cluster
8408          */
8409         cluster = &root->fs_info->meta_alloc_cluster;
8410         spin_lock(&cluster->refill_lock);
8411         btrfs_return_cluster_to_free_space(block_group, cluster);
8412         spin_unlock(&cluster->refill_lock);
8413
8414         path = btrfs_alloc_path();
8415         if (!path) {
8416                 ret = -ENOMEM;
8417                 goto out;
8418         }
8419
8420         inode = lookup_free_space_inode(tree_root, block_group, path);
8421         if (!IS_ERR(inode)) {
8422                 ret = btrfs_orphan_add(trans, inode);
8423                 if (ret) {
8424                         btrfs_add_delayed_iput(inode);
8425                         goto out;
8426                 }
8427                 clear_nlink(inode);
8428                 /* One for the block groups ref */
8429                 spin_lock(&block_group->lock);
8430                 if (block_group->iref) {
8431                         block_group->iref = 0;
8432                         block_group->inode = NULL;
8433                         spin_unlock(&block_group->lock);
8434                         iput(inode);
8435                 } else {
8436                         spin_unlock(&block_group->lock);
8437                 }
8438                 /* One for our lookup ref */
8439                 btrfs_add_delayed_iput(inode);
8440         }
8441
8442         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8443         key.offset = block_group->key.objectid;
8444         key.type = 0;
8445
8446         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8447         if (ret < 0)
8448                 goto out;
8449         if (ret > 0)
8450                 btrfs_release_path(path);
8451         if (ret == 0) {
8452                 ret = btrfs_del_item(trans, tree_root, path);
8453                 if (ret)
8454                         goto out;
8455                 btrfs_release_path(path);
8456         }
8457
8458         spin_lock(&root->fs_info->block_group_cache_lock);
8459         rb_erase(&block_group->cache_node,
8460                  &root->fs_info->block_group_cache_tree);
8461
8462         if (root->fs_info->first_logical_byte == block_group->key.objectid)
8463                 root->fs_info->first_logical_byte = (u64)-1;
8464         spin_unlock(&root->fs_info->block_group_cache_lock);
8465
8466         down_write(&block_group->space_info->groups_sem);
8467         /*
8468          * we must use list_del_init so people can check to see if they
8469          * are still on the list after taking the semaphore
8470          */
8471         list_del_init(&block_group->list);
8472         if (list_empty(&block_group->space_info->block_groups[index]))
8473                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
8474         up_write(&block_group->space_info->groups_sem);
8475
8476         if (block_group->cached == BTRFS_CACHE_STARTED)
8477                 wait_block_group_cache_done(block_group);
8478
8479         btrfs_remove_free_space_cache(block_group);
8480
8481         spin_lock(&block_group->space_info->lock);
8482         block_group->space_info->total_bytes -= block_group->key.offset;
8483         block_group->space_info->bytes_readonly -= block_group->key.offset;
8484         block_group->space_info->disk_total -= block_group->key.offset * factor;
8485         spin_unlock(&block_group->space_info->lock);
8486
8487         memcpy(&key, &block_group->key, sizeof(key));
8488
8489         btrfs_clear_space_info_full(root->fs_info);
8490
8491         btrfs_put_block_group(block_group);
8492         btrfs_put_block_group(block_group);
8493
8494         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8495         if (ret > 0)
8496                 ret = -EIO;
8497         if (ret < 0)
8498                 goto out;
8499
8500         ret = btrfs_del_item(trans, root, path);
8501 out:
8502         btrfs_free_path(path);
8503         return ret;
8504 }
8505
8506 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8507 {
8508         struct btrfs_space_info *space_info;
8509         struct btrfs_super_block *disk_super;
8510         u64 features;
8511         u64 flags;
8512         int mixed = 0;
8513         int ret;
8514
8515         disk_super = fs_info->super_copy;
8516         if (!btrfs_super_root(disk_super))
8517                 return 1;
8518
8519         features = btrfs_super_incompat_flags(disk_super);
8520         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8521                 mixed = 1;
8522
8523         flags = BTRFS_BLOCK_GROUP_SYSTEM;
8524         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8525         if (ret)
8526                 goto out;
8527
8528         if (mixed) {
8529                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8530                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8531         } else {
8532                 flags = BTRFS_BLOCK_GROUP_METADATA;
8533                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8534                 if (ret)
8535                         goto out;
8536
8537                 flags = BTRFS_BLOCK_GROUP_DATA;
8538                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8539         }
8540 out:
8541         return ret;
8542 }
8543
8544 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8545 {
8546         return unpin_extent_range(root, start, end);
8547 }
8548
8549 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8550                                u64 num_bytes, u64 *actual_bytes)
8551 {
8552         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8553 }
8554
8555 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8556 {
8557         struct btrfs_fs_info *fs_info = root->fs_info;
8558         struct btrfs_block_group_cache *cache = NULL;
8559         u64 group_trimmed;
8560         u64 start;
8561         u64 end;
8562         u64 trimmed = 0;
8563         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8564         int ret = 0;
8565
8566         /*
8567          * try to trim all FS space, our block group may start from non-zero.
8568          */
8569         if (range->len == total_bytes)
8570                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8571         else
8572                 cache = btrfs_lookup_block_group(fs_info, range->start);
8573
8574         while (cache) {
8575                 if (cache->key.objectid >= (range->start + range->len)) {
8576                         btrfs_put_block_group(cache);
8577                         break;
8578                 }
8579
8580                 start = max(range->start, cache->key.objectid);
8581                 end = min(range->start + range->len,
8582                                 cache->key.objectid + cache->key.offset);
8583
8584                 if (end - start >= range->minlen) {
8585                         if (!block_group_cache_done(cache)) {
8586                                 ret = cache_block_group(cache, 0);
8587                                 if (!ret)
8588                                         wait_block_group_cache_done(cache);
8589                         }
8590                         ret = btrfs_trim_block_group(cache,
8591                                                      &group_trimmed,
8592                                                      start,
8593                                                      end,
8594                                                      range->minlen);
8595
8596                         trimmed += group_trimmed;
8597                         if (ret) {
8598                                 btrfs_put_block_group(cache);
8599                                 break;
8600                         }
8601                 }
8602
8603                 cache = next_block_group(fs_info->tree_root, cache);
8604         }
8605
8606         range->len = trimmed;
8607         return ret;
8608 }