Skip to content

Commit

Permalink
xfs: use byte ranges for write cleanup ranges
Browse files Browse the repository at this point in the history
xfs_buffered_write_iomap_end() currently converts the byte ranges
passed to it to filesystem blocks to pass them to the bmap code to
punch out delalloc blocks, but then has to convert filesytem
blocks back to byte ranges for page cache truncate.

We're about to make the page cache truncate go away and replace it
with a page cache walk, so having to convert everything to/from/to
filesystem blocks is messy and error-prone. It is much easier to
pass around byte ranges and convert to page indexes and/or
filesystem blocks only where those units are needed.

In preparation for the page cache walk being added, add a helper
that converts byte ranges to filesystem blocks and calls
xfs_bmap_punch_delalloc_range() and convert
xfs_buffered_write_iomap_end() to calculate limits in byte ranges.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
  • Loading branch information
Dave Chinner authored and Dave Chinner committed Nov 23, 2022
1 parent 198dd8a commit b71f889
Showing 1 changed file with 25 additions and 15 deletions.
40 changes: 25 additions & 15 deletions fs/xfs/xfs_iomap.c
Original file line number Diff line number Diff line change
Expand Up @@ -1120,6 +1120,20 @@ xfs_buffered_write_iomap_begin(
return error;
}

static int
xfs_buffered_write_delalloc_punch(
struct inode *inode,
loff_t start_byte,
loff_t end_byte)
{
struct xfs_mount *mp = XFS_M(inode->i_sb);
xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, start_byte);
xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, end_byte);

return xfs_bmap_punch_delalloc_range(XFS_I(inode), start_fsb,
end_fsb - start_fsb);
}

static int
xfs_buffered_write_iomap_end(
struct inode *inode,
Expand All @@ -1129,10 +1143,9 @@ xfs_buffered_write_iomap_end(
unsigned flags,
struct iomap *iomap)
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
xfs_fileoff_t start_fsb;
xfs_fileoff_t end_fsb;
struct xfs_mount *mp = XFS_M(inode->i_sb);
loff_t start_byte;
loff_t end_byte;
int error = 0;

if (iomap->type != IOMAP_DELALLOC)
Expand All @@ -1157,13 +1170,13 @@ xfs_buffered_write_iomap_end(
* the range.
*/
if (unlikely(!written))
start_fsb = XFS_B_TO_FSBT(mp, offset);
start_byte = round_down(offset, mp->m_sb.sb_blocksize);
else
start_fsb = XFS_B_TO_FSB(mp, offset + written);
end_fsb = XFS_B_TO_FSB(mp, offset + length);
start_byte = round_up(offset + written, mp->m_sb.sb_blocksize);
end_byte = round_up(offset + length, mp->m_sb.sb_blocksize);

/* Nothing to do if we've written the entire delalloc extent */
if (start_fsb >= end_fsb)
if (start_byte >= end_byte)
return 0;

/*
Expand All @@ -1173,15 +1186,12 @@ xfs_buffered_write_iomap_end(
* leave dirty pages with no space reservation in the cache.
*/
filemap_invalidate_lock(inode->i_mapping);
truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
XFS_FSB_TO_B(mp, end_fsb) - 1);

error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
end_fsb - start_fsb);
truncate_pagecache_range(inode, start_byte, end_byte - 1);
error = xfs_buffered_write_delalloc_punch(inode, start_byte, end_byte);
filemap_invalidate_unlock(inode->i_mapping);
if (error && !xfs_is_shutdown(mp)) {
xfs_alert(mp, "%s: unable to clean up ino %lld",
__func__, ip->i_ino);
xfs_alert(mp, "%s: unable to clean up ino 0x%llx",
__func__, XFS_I(inode)->i_ino);
return error;
}
return 0;
Expand Down

0 comments on commit b71f889

Please sign in to comment.