I noticed that if the free space cache has an error writing out it's data it
won't actually error out, it will just carry on. This is because it doesn't
check the return value of btrfs_wait_ordered_range, which didn't actually return
anything. So fix this in order to keep us from making free space cache look
valid when it really isnt. Thanks,
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
}
wait_on_page_writeback(pages[i]);
}
}
wait_on_page_writeback(pages[i]);
}
err = 0;
if (start_pos < inode->i_size) {
struct btrfs_ordered_extent *ordered;
err = 0;
if (start_pos < inode->i_size) {
struct btrfs_ordered_extent *ordered;
unlock_page(pages[i]);
page_cache_release(pages[i]);
}
unlock_page(pages[i]);
page_cache_release(pages[i]);
}
- btrfs_wait_ordered_range(inode, start_pos,
- last_pos - start_pos);
+ err = btrfs_wait_ordered_range(inode, start_pos,
+ last_pos - start_pos);
+ if (err)
+ goto fail;
goto again;
}
if (ordered)
goto again;
}
if (ordered)
atomic_inc(&root->log_batch);
full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags);
atomic_inc(&root->log_batch);
full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags);
- if (full_sync)
- btrfs_wait_ordered_range(inode, start, end - start + 1);
+ if (full_sync) {
+ ret = btrfs_wait_ordered_range(inode, start, end - start + 1);
+ if (ret) {
+ mutex_unlock(&inode->i_mutex);
+ goto out;
+ }
+ }
atomic_inc(&root->log_batch);
/*
atomic_inc(&root->log_batch);
/*
mutex_unlock(&inode->i_mutex);
if (ret != BTRFS_NO_LOG_SYNC) {
mutex_unlock(&inode->i_mutex);
if (ret != BTRFS_NO_LOG_SYNC) {
- if (ret > 0) {
- /*
- * If we didn't already wait for ordered extents we need
- * to do that now.
- */
- if (!full_sync)
- btrfs_wait_ordered_range(inode, start,
- end - start + 1);
- ret = btrfs_commit_transaction(trans, root);
- } else {
ret = btrfs_sync_log(trans, root);
ret = btrfs_sync_log(trans, root);
ret = btrfs_end_transaction(trans, root);
ret = btrfs_end_transaction(trans, root);
- } else {
- if (!full_sync)
- btrfs_wait_ordered_range(inode, start,
- end -
- start + 1);
- ret = btrfs_commit_transaction(trans, root);
+ if (!full_sync) {
+ ret = btrfs_wait_ordered_range(inode, start,
+ end - start + 1);
+ if (ret)
+ goto out;
+ }
+ ret = btrfs_commit_transaction(trans, root);
} else {
ret = btrfs_end_transaction(trans, root);
}
} else {
ret = btrfs_end_transaction(trans, root);
}
bool same_page = ((offset >> PAGE_CACHE_SHIFT) ==
((offset + len - 1) >> PAGE_CACHE_SHIFT));
bool same_page = ((offset >> PAGE_CACHE_SHIFT) ==
((offset + len - 1) >> PAGE_CACHE_SHIFT));
- btrfs_wait_ordered_range(inode, offset, len);
+ ret = btrfs_wait_ordered_range(inode, offset, len);
+ if (ret)
+ return ret;
mutex_lock(&inode->i_mutex);
/*
mutex_lock(&inode->i_mutex);
/*
btrfs_put_ordered_extent(ordered);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
lockend, &cached_state, GFP_NOFS);
btrfs_put_ordered_extent(ordered);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
lockend, &cached_state, GFP_NOFS);
- btrfs_wait_ordered_range(inode, lockstart,
- lockend - lockstart + 1);
+ ret = btrfs_wait_ordered_range(inode, lockstart,
+ lockend - lockstart + 1);
+ if (ret) {
+ mutex_unlock(&inode->i_mutex);
+ return ret;
+ }
}
path = btrfs_alloc_path();
}
path = btrfs_alloc_path();
* wait for ordered IO before we have any locks. We'll loop again
* below with the locks held.
*/
* wait for ordered IO before we have any locks. We'll loop again
* below with the locks held.
*/
- btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
+ ret = btrfs_wait_ordered_range(inode, alloc_start,
+ alloc_end - alloc_start);
+ if (ret)
+ goto out;
locked_end = alloc_end - 1;
while (1) {
locked_end = alloc_end - 1;
while (1) {
* we can't wait on the range with the transaction
* running or with the extent lock held
*/
* we can't wait on the range with the transaction
* running or with the extent lock held
*/
- btrfs_wait_ordered_range(inode, alloc_start,
- alloc_end - alloc_start);
+ ret = btrfs_wait_ordered_range(inode, alloc_start,
+ alloc_end - alloc_start);
+ if (ret)
+ goto out;
} else {
if (ordered)
btrfs_put_ordered_extent(ordered);
} else {
if (ordered)
btrfs_put_ordered_extent(ordered);
-
- btrfs_wait_ordered_range(inode, 0, (u64)-1);
+ ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
+ if (ret) {
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
+ EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
+ GFP_NOFS);
+ goto out;
+ }
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
key.offset = offset;
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
key.offset = offset;
* outstanding dirty pages are on disk.
*/
count = iov_length(iov, nr_segs);
* outstanding dirty pages are on disk.
*/
count = iov_length(iov, nr_segs);
- btrfs_wait_ordered_range(inode, offset, count);
+ ret = btrfs_wait_ordered_range(inode, offset, count);
+ if (ret)
+ return ret;
u64 mask = root->sectorsize - 1;
u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
u64 mask = root->sectorsize - 1;
u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
- btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
+ ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
+ (u64)-1);
+ if (ret)
+ return ret;
/*
* Yes ladies and gentelment, this is indeed ugly. The fact is we have
/*
* Yes ladies and gentelment, this is indeed ugly. The fact is we have
/*
* Used to wait on ordered extents across a large range of bytes.
*/
/*
* Used to wait on ordered extents across a large range of bytes.
*/
-void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
+int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
u64 end;
u64 orig_end;
struct btrfs_ordered_extent *ordered;
u64 end;
u64 orig_end;
struct btrfs_ordered_extent *ordered;
/* start IO across the range first to instantiate any delalloc
* extents
*/
/* start IO across the range first to instantiate any delalloc
* extents
*/
- filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
-
+ ret = filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
+ if (ret)
+ return ret;
/*
* So with compression we will find and lock a dirty page and clear the
* first one as dirty, setup an async extent, and immediately return
/*
* So with compression we will find and lock a dirty page and clear the
* first one as dirty, setup an async extent, and immediately return
* right and you are wrong.
*/
if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
* right and you are wrong.
*/
if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
- &BTRFS_I(inode)->runtime_flags))
- filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
-
- filemap_fdatawait_range(inode->i_mapping, start, orig_end);
+ &BTRFS_I(inode)->runtime_flags)) {
+ ret = filemap_fdatawrite_range(inode->i_mapping, start,
+ orig_end);
+ if (ret)
+ return ret;
+ }
+ ret = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
+ if (ret)
+ return ret;
end = orig_end;
while (1) {
end = orig_end;
while (1) {
}
btrfs_start_ordered_extent(inode, ordered, 1);
end = ordered->file_offset;
}
btrfs_start_ordered_extent(inode, ordered, 1);
end = ordered->file_offset;
+ if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
+ ret = -EIO;
btrfs_put_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
- if (end == 0 || end == start)
+ if (ret || end == 0 || end == start)
u64 file_offset);
void btrfs_start_ordered_extent(struct inode *inode,
struct btrfs_ordered_extent *entry, int wait);
u64 file_offset);
void btrfs_start_ordered_extent(struct inode *inode,
struct btrfs_ordered_extent *entry, int wait);
-void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
+int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
struct btrfs_ordered_extent *
btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
struct btrfs_ordered_extent *
btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
rc->extents_found);
if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
rc->extents_found);
if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
- btrfs_wait_ordered_range(rc->data_inode, 0, (u64)-1);
+ ret = btrfs_wait_ordered_range(rc->data_inode, 0,
+ (u64)-1);
+ if (ret) {
+ err = ret;
+ goto out;
+ }
invalidate_mapping_pages(rc->data_inode->i_mapping,
0, -1);
rc->stage = UPDATE_DATA_PTRS;
invalidate_mapping_pages(rc->data_inode->i_mapping,
0, -1);
rc->stage = UPDATE_DATA_PTRS;