Btrfs: free reserved space on error in a few places
authorJosef Bacik <jbacik@fusionio.com>
Mon, 7 Oct 2013 19:21:08 +0000 (15:21 -0400)
committerChris Mason <chris.mason@fusionio.com>
Tue, 12 Nov 2013 02:56:41 +0000 (21:56 -0500)
While trying to track down a reserved space leak I noticed a few places where we
won't properly clean up reserved space if we have an error, this patch fixes
those up.  Thanks,

Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
fs/btrfs/extent-tree.c
fs/btrfs/inode.c

index 65401d7ef663c0d940368b5041f29a0a15acdff0..054b11dc8edfdd6109fc99ee35a41b166c0058a1 100644 (file)
@@ -2234,8 +2234,12 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
 {
        int ret = 0;
 
-       if (trans->aborted)
+       if (trans->aborted) {
+               if (insert_reserved)
+                       btrfs_pin_extent(root, node->bytenr,
+                                        node->num_bytes, 1);
                return 0;
+       }
 
        if (btrfs_delayed_ref_is_head(node)) {
                struct btrfs_delayed_ref_head *head;
@@ -2411,6 +2415,14 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
                                btrfs_free_delayed_extent_op(extent_op);
 
                                if (ret) {
+                                       /*
+                                        * Need to reset must_insert_reserved if
+                                        * there was an error so the abort stuff
+                                        * can cleanup the reserved space
+                                        * properly.
+                                        */
+                                       if (must_insert_reserved)
+                                               locked_ref->must_insert_reserved = 1;
                                        btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
                                        spin_lock(&delayed_refs->lock);
                                        btrfs_delayed_ref_unlock(locked_ref);
@@ -6731,13 +6743,18 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
                size += sizeof(*block_info);
 
        path = btrfs_alloc_path();
-       if (!path)
+       if (!path) {
+               btrfs_free_and_pin_reserved_extent(root, ins->objectid,
+                                                  root->leafsize);
                return -ENOMEM;
+       }
 
        path->leave_spinning = 1;
        ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
                                      ins, size);
        if (ret) {
+               btrfs_free_and_pin_reserved_extent(root, ins->objectid,
+                                                  root->leafsize);
                btrfs_free_path(path);
                return ret;
        }
index bb242f2fb51e60dca08a8963a6f71602f68a74e9..bba7f1a5e9f394655d674f925ef5b789daecd986 100644 (file)
@@ -8498,6 +8498,8 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
                                                  ins.offset, 0, 0, 0,
                                                  BTRFS_FILE_EXTENT_PREALLOC);
                if (ret) {
+                       btrfs_free_reserved_extent(root, ins.objectid,
+                                                  ins.offset);
                        btrfs_abort_transaction(trans, root, ret);
                        if (own_trans)
                                btrfs_end_transaction(trans, root);