Btrfs: fix error handling in make/read block group
authorJosef Bacik <jbacik@fusionio.com>
Tue, 2 Apr 2013 16:40:42 +0000 (12:40 -0400)
committerJosef Bacik <jbacik@fusionio.com>
Mon, 6 May 2013 19:54:32 +0000 (15:54 -0400)
I noticed that we will add a block group to the space info before we add it to
the block group cache rb tree, so we could potentially allocate from the block
group before it's able to be searched for.  I don't think this is too much of
a problem, the race window is microscopic, but just in case move the tree
insertion to above the space info linking.  This makes it easier to adjust the
error handling as well, so we can remove a couple of BUG_ON(ret)'s and have real
error handling setup for these scenarios.  Thanks,

Signed-off-by: Josef Bacik <jbacik@fusionio.com>
fs/btrfs/extent-tree.c

index 171f8d676d9cc7624ba0732085623f2e337c96e0..cba98c1bcc0c11eff0fad32a7cf5a9fc3ac7c810 100644 (file)
@@ -8175,10 +8175,26 @@ int btrfs_read_block_groups(struct btrfs_root *root)
                        free_excluded_extents(root, cache);
                }
 
+               ret = btrfs_add_block_group_cache(root->fs_info, cache);
+               if (ret) {
+                       btrfs_remove_free_space_cache(cache);
+                       btrfs_put_block_group(cache);
+                       goto error;
+               }
+
                ret = update_space_info(info, cache->flags, found_key.offset,
                                        btrfs_block_group_used(&cache->item),
                                        &space_info);
-               BUG_ON(ret); /* -ENOMEM */
+               if (ret) {
+                       btrfs_remove_free_space_cache(cache);
+                       spin_lock(&info->block_group_cache_lock);
+                       rb_erase(&cache->cache_node,
+                                &info->block_group_cache_tree);
+                       spin_unlock(&info->block_group_cache_lock);
+                       btrfs_put_block_group(cache);
+                       goto error;
+               }
+
                cache->space_info = space_info;
                spin_lock(&cache->space_info->lock);
                cache->space_info->bytes_readonly += cache->bytes_super;
@@ -8186,9 +8202,6 @@ int btrfs_read_block_groups(struct btrfs_root *root)
 
                __link_block_group(space_info, cache);
 
-               ret = btrfs_add_block_group_cache(root->fs_info, cache);
-               BUG_ON(ret); /* Logic error */
-
                set_avail_alloc_bits(root->fs_info, cache->flags);
                if (btrfs_chunk_readonly(root, cache->key.objectid))
                        set_block_group_ro(cache, 1);
@@ -8311,9 +8324,24 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
 
        free_excluded_extents(root, cache);
 
+       ret = btrfs_add_block_group_cache(root->fs_info, cache);
+       if (ret) {
+               btrfs_remove_free_space_cache(cache);
+               btrfs_put_block_group(cache);
+               return ret;
+       }
+
        ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
                                &cache->space_info);
-       BUG_ON(ret); /* -ENOMEM */
+       if (ret) {
+               btrfs_remove_free_space_cache(cache);
+               spin_lock(&root->fs_info->block_group_cache_lock);
+               rb_erase(&cache->cache_node,
+                        &root->fs_info->block_group_cache_tree);
+               spin_unlock(&root->fs_info->block_group_cache_lock);
+               btrfs_put_block_group(cache);
+               return ret;
+       }
        update_global_block_rsv(root->fs_info);
 
        spin_lock(&cache->space_info->lock);
@@ -8322,9 +8350,6 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
 
        __link_block_group(cache->space_info, cache);
 
-       ret = btrfs_add_block_group_cache(root->fs_info, cache);
-       BUG_ON(ret); /* Logic error */
-
        list_add_tail(&cache->new_bg_list, &trans->new_bgs);
 
        set_avail_alloc_bits(extent_root->fs_info, type);