Skip to content

Commit

Permalink
Btrfs: return an error from btrfs_wait_ordered_range
Browse files Browse the repository at this point in the history
I noticed that if the free space cache has an error writing out it's data it
won't actually error out, it will just carry on.  This is because it doesn't
check the return value of btrfs_wait_ordered_range, which didn't actually return
anything.  So fix this in order to keep us from making free space cache look
valid when it really isnt.  Thanks,

Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
  • Loading branch information
Josef Bacik authored and Chris Mason committed Nov 12, 2013
1 parent ed25909 commit 0ef8b72
Show file tree
Hide file tree
Showing 6 changed files with 78 additions and 41 deletions.
66 changes: 39 additions & 27 deletions fs/btrfs/file.c
Original file line number Diff line number Diff line change
Expand Up @@ -1280,6 +1280,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
}
wait_on_page_writeback(pages[i]);
}
faili = num_pages - 1;
err = 0;
if (start_pos < inode->i_size) {
struct btrfs_ordered_extent *ordered;
Expand All @@ -1298,8 +1299,10 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
unlock_page(pages[i]);
page_cache_release(pages[i]);
}
btrfs_wait_ordered_range(inode, start_pos,
last_pos - start_pos);
err = btrfs_wait_ordered_range(inode, start_pos,
last_pos - start_pos);
if (err)
goto fail;
goto again;
}
if (ordered)
Expand Down Expand Up @@ -1808,8 +1811,13 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
atomic_inc(&root->log_batch);
full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags);
if (full_sync)
btrfs_wait_ordered_range(inode, start, end - start + 1);
if (full_sync) {
ret = btrfs_wait_ordered_range(inode, start, end - start + 1);
if (ret) {
mutex_unlock(&inode->i_mutex);
goto out;
}
}
atomic_inc(&root->log_batch);

/*
Expand Down Expand Up @@ -1875,27 +1883,20 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
mutex_unlock(&inode->i_mutex);

if (ret != BTRFS_NO_LOG_SYNC) {
if (ret > 0) {
/*
* If we didn't already wait for ordered extents we need
* to do that now.
*/
if (!full_sync)
btrfs_wait_ordered_range(inode, start,
end - start + 1);
ret = btrfs_commit_transaction(trans, root);
} else {
if (!ret) {
ret = btrfs_sync_log(trans, root);
if (ret == 0) {
if (!ret) {
ret = btrfs_end_transaction(trans, root);
} else {
if (!full_sync)
btrfs_wait_ordered_range(inode, start,
end -
start + 1);
ret = btrfs_commit_transaction(trans, root);
goto out;
}
}
if (!full_sync) {
ret = btrfs_wait_ordered_range(inode, start,
end - start + 1);
if (ret)
goto out;
}
ret = btrfs_commit_transaction(trans, root);
} else {
ret = btrfs_end_transaction(trans, root);
}
Expand Down Expand Up @@ -2066,7 +2067,9 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
bool same_page = ((offset >> PAGE_CACHE_SHIFT) ==
((offset + len - 1) >> PAGE_CACHE_SHIFT));

btrfs_wait_ordered_range(inode, offset, len);
ret = btrfs_wait_ordered_range(inode, offset, len);
if (ret)
return ret;

mutex_lock(&inode->i_mutex);
/*
Expand Down Expand Up @@ -2135,8 +2138,12 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
btrfs_put_ordered_extent(ordered);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
lockend, &cached_state, GFP_NOFS);
btrfs_wait_ordered_range(inode, lockstart,
lockend - lockstart + 1);
ret = btrfs_wait_ordered_range(inode, lockstart,
lockend - lockstart + 1);
if (ret) {
mutex_unlock(&inode->i_mutex);
return ret;
}
}

path = btrfs_alloc_path();
Expand Down Expand Up @@ -2307,7 +2314,10 @@ static long btrfs_fallocate(struct file *file, int mode,
* wait for ordered IO before we have any locks. We'll loop again
* below with the locks held.
*/
btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
ret = btrfs_wait_ordered_range(inode, alloc_start,
alloc_end - alloc_start);
if (ret)
goto out;

locked_end = alloc_end - 1;
while (1) {
Expand All @@ -2331,8 +2341,10 @@ static long btrfs_fallocate(struct file *file, int mode,
* we can't wait on the range with the transaction
* running or with the extent lock held
*/
btrfs_wait_ordered_range(inode, alloc_start,
alloc_end - alloc_start);
ret = btrfs_wait_ordered_range(inode, alloc_start,
alloc_end - alloc_start);
if (ret)
goto out;
} else {
if (ordered)
btrfs_put_ordered_extent(ordered);
Expand Down
9 changes: 7 additions & 2 deletions fs/btrfs/free-space-cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -1008,8 +1008,13 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
if (ret)
goto out;


btrfs_wait_ordered_range(inode, 0, (u64)-1);
ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
if (ret) {
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
GFP_NOFS);
goto out;
}

key.objectid = BTRFS_FREE_SPACE_OBJECTID;
key.offset = offset;
Expand Down
9 changes: 7 additions & 2 deletions fs/btrfs/inode.c
Original file line number Diff line number Diff line change
Expand Up @@ -7236,7 +7236,9 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
* outstanding dirty pages are on disk.
*/
count = iov_length(iov, nr_segs);
btrfs_wait_ordered_range(inode, offset, count);
ret = btrfs_wait_ordered_range(inode, offset, count);
if (ret)
return ret;

if (rw & WRITE) {
/*
Expand Down Expand Up @@ -7577,7 +7579,10 @@ static int btrfs_truncate(struct inode *inode)
u64 mask = root->sectorsize - 1;
u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);

btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
(u64)-1);
if (ret)
return ret;

/*
* Yes ladies and gentelment, this is indeed ugly. The fact is we have
Expand Down
26 changes: 18 additions & 8 deletions fs/btrfs/ordered-data.c
Original file line number Diff line number Diff line change
Expand Up @@ -734,8 +734,9 @@ void btrfs_start_ordered_extent(struct inode *inode,
/*
* Used to wait on ordered extents across a large range of bytes.
*/
void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
{
int ret = 0;
u64 end;
u64 orig_end;
struct btrfs_ordered_extent *ordered;
Expand All @@ -751,8 +752,9 @@ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
/* start IO across the range first to instantiate any delalloc
* extents
*/
filemap_fdatawrite_range(inode->i_mapping, start, orig_end);

ret = filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
if (ret)
return ret;
/*
* So with compression we will find and lock a dirty page and clear the
* first one as dirty, setup an async extent, and immediately return
Expand All @@ -768,10 +770,15 @@ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
* right and you are wrong.
*/
if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
&BTRFS_I(inode)->runtime_flags))
filemap_fdatawrite_range(inode->i_mapping, start, orig_end);

filemap_fdatawait_range(inode->i_mapping, start, orig_end);
&BTRFS_I(inode)->runtime_flags)) {
ret = filemap_fdatawrite_range(inode->i_mapping, start,
orig_end);
if (ret)
return ret;
}
ret = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
if (ret)
return ret;

end = orig_end;
while (1) {
Expand All @@ -788,11 +795,14 @@ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
}
btrfs_start_ordered_extent(inode, ordered, 1);
end = ordered->file_offset;
if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
ret = -EIO;
btrfs_put_ordered_extent(ordered);
if (end == 0 || end == start)
if (ret || end == 0 || end == start)
break;
end--;
}
return ret;
}

/*
Expand Down
2 changes: 1 addition & 1 deletion fs/btrfs/ordered-data.h
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
u64 file_offset);
void btrfs_start_ordered_extent(struct inode *inode,
struct btrfs_ordered_extent *entry, int wait);
void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
struct btrfs_ordered_extent *
btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
Expand Down
7 changes: 6 additions & 1 deletion fs/btrfs/relocation.c
Original file line number Diff line number Diff line change
Expand Up @@ -4257,7 +4257,12 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
rc->extents_found);

if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
btrfs_wait_ordered_range(rc->data_inode, 0, (u64)-1);
ret = btrfs_wait_ordered_range(rc->data_inode, 0,
(u64)-1);
if (ret) {
err = ret;
goto out;
}
invalidate_mapping_pages(rc->data_inode->i_mapping,
0, -1);
rc->stage = UPDATE_DATA_PTRS;
Expand Down

0 comments on commit 0ef8b72

Please sign in to comment.