Skip to content

Commit

Permalink
xfs: move xfs_inactive call to xfs_inode_mark_reclaimable
Browse files Browse the repository at this point in the history
Move the xfs_inactive call and all the other debugging checks and stats
updates into xfs_inode_mark_reclaimable because most of that are
implementation details about the inode cache.  This is preparation for
deferred inactivation that is coming up.  We also move it around
xfs_icache.c in preparation for deferred inactivation.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
  • Loading branch information
Darrick J. Wong committed Aug 6, 2021
1 parent 0ed17f0 commit c6c2066
Show file tree
Hide file tree
Showing 2 changed files with 74 additions and 75 deletions.
99 changes: 74 additions & 25 deletions fs/xfs/xfs_icache.c
Original file line number Diff line number Diff line change
Expand Up @@ -292,31 +292,6 @@ xfs_perag_clear_inode_tag(
trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
}

/*
* We set the inode flag atomically with the radix tree tag.
* Once we get tag lookups on the radix tree, this inode flag
* can go away.
*/
void
xfs_inode_mark_reclaimable(
struct xfs_inode *ip)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_perag *pag;

pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
spin_lock(&pag->pag_ici_lock);
spin_lock(&ip->i_flags_lock);

xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
XFS_ICI_RECLAIM_TAG);
__xfs_iflags_set(ip, XFS_IRECLAIMABLE);

spin_unlock(&ip->i_flags_lock);
spin_unlock(&pag->pag_ici_lock);
xfs_perag_put(pag);
}

static inline void
xfs_inew_wait(
struct xfs_inode *ip)
Expand Down Expand Up @@ -1739,3 +1714,77 @@ xfs_icwalk(
return last_error;
BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID);
}

#ifdef DEBUG
static void
xfs_check_delalloc(
struct xfs_inode *ip,
int whichfork)
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
struct xfs_bmbt_irec got;
struct xfs_iext_cursor icur;

if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
return;
do {
if (isnullstartblock(got.br_startblock)) {
xfs_warn(ip->i_mount,
"ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
ip->i_ino,
whichfork == XFS_DATA_FORK ? "data" : "cow",
got.br_startoff, got.br_blockcount);
}
} while (xfs_iext_next_extent(ifp, &icur, &got));
}
#else
#define xfs_check_delalloc(ip, whichfork) do { } while (0)
#endif

/*
* We set the inode flag atomically with the radix tree tag.
* Once we get tag lookups on the radix tree, this inode flag
* can go away.
*/
void
xfs_inode_mark_reclaimable(
struct xfs_inode *ip)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_perag *pag;

xfs_inactive(ip);

if (!XFS_FORCED_SHUTDOWN(mp) && ip->i_delayed_blks) {
xfs_check_delalloc(ip, XFS_DATA_FORK);
xfs_check_delalloc(ip, XFS_COW_FORK);
ASSERT(0);
}

XFS_STATS_INC(mp, vn_reclaim);

/*
* We should never get here with one of the reclaim flags already set.
*/
ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));

/*
* We always use background reclaim here because even if the inode is
* clean, it still may be under IO and hence we have wait for IO
* completion to occur before we can reclaim the inode. The background
* reclaim path handles this more efficiently than we can here, so
* simply let background reclaim tear down all inodes.
*/
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
spin_lock(&pag->pag_ici_lock);
spin_lock(&ip->i_flags_lock);

xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
XFS_ICI_RECLAIM_TAG);
__xfs_iflags_set(ip, XFS_IRECLAIMABLE);

spin_unlock(&ip->i_flags_lock);
spin_unlock(&pag->pag_ici_lock);
xfs_perag_put(pag);
}
50 changes: 0 additions & 50 deletions fs/xfs/xfs_super.c
Original file line number Diff line number Diff line change
Expand Up @@ -613,32 +613,6 @@ xfs_fs_alloc_inode(
return NULL;
}

#ifdef DEBUG
static void
xfs_check_delalloc(
struct xfs_inode *ip,
int whichfork)
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
struct xfs_bmbt_irec got;
struct xfs_iext_cursor icur;

if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
return;
do {
if (isnullstartblock(got.br_startblock)) {
xfs_warn(ip->i_mount,
"ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
ip->i_ino,
whichfork == XFS_DATA_FORK ? "data" : "cow",
got.br_startoff, got.br_blockcount);
}
} while (xfs_iext_next_extent(ifp, &icur, &got));
}
#else
#define xfs_check_delalloc(ip, whichfork) do { } while (0)
#endif

/*
* Now that the generic code is guaranteed not to be accessing
* the linux inode, we can inactivate and reclaim the inode.
Expand All @@ -654,30 +628,6 @@ xfs_fs_destroy_inode(
ASSERT(!rwsem_is_locked(&inode->i_rwsem));
XFS_STATS_INC(ip->i_mount, vn_rele);
XFS_STATS_INC(ip->i_mount, vn_remove);

xfs_inactive(ip);

if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) {
xfs_check_delalloc(ip, XFS_DATA_FORK);
xfs_check_delalloc(ip, XFS_COW_FORK);
ASSERT(0);
}

XFS_STATS_INC(ip->i_mount, vn_reclaim);

/*
* We should never get here with one of the reclaim flags already set.
*/
ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));

/*
* We always use background reclaim here because even if the inode is
* clean, it still may be under IO and hence we have wait for IO
* completion to occur before we can reclaim the inode. The background
* reclaim path handles this more efficiently than we can here, so
* simply let background reclaim tear down all inodes.
*/
xfs_inode_mark_reclaimable(ip);
}

Expand Down

0 comments on commit c6c2066

Please sign in to comment.