Skip to content

Commit

Permalink
xfs: replace kmem_alloc_large() with kvmalloc()
Browse files Browse the repository at this point in the history
There is no reason for this wrapper existing anymore. All the places
that use KM_NOFS allocation are within transaction contexts and
hence covered by memalloc_nofs_save/restore contexts. Hence we don't
need any special handling of vmalloc for large IOs anymore and
so special casing this code isn't necessary.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
  • Loading branch information
Dave Chinner authored and Darrick J. Wong committed Aug 9, 2021
1 parent 98fe2c3 commit d634525
Show file tree
Hide file tree
Showing 9 changed files with 21 additions and 55 deletions.
39 changes: 0 additions & 39 deletions fs/xfs/kmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,42 +29,3 @@ kmem_alloc(size_t size, xfs_km_flags_t flags)
congestion_wait(BLK_RW_ASYNC, HZ/50);
} while (1);
}


/*
* __vmalloc() will allocate data pages and auxiliary structures (e.g.
* pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context here. Hence
* we need to tell memory reclaim that we are in such a context via
* PF_MEMALLOC_NOFS to prevent memory reclaim re-entering the filesystem here
* and potentially deadlocking.
*/
static void *
__kmem_vmalloc(size_t size, xfs_km_flags_t flags)
{
unsigned nofs_flag = 0;
void *ptr;
gfp_t lflags = kmem_flags_convert(flags);

if (flags & KM_NOFS)
nofs_flag = memalloc_nofs_save();

ptr = __vmalloc(size, lflags);

if (flags & KM_NOFS)
memalloc_nofs_restore(nofs_flag);

return ptr;
}

void *
kmem_alloc_large(size_t size, xfs_km_flags_t flags)
{
void *ptr;

trace_kmem_alloc_large(size, flags, _RET_IP_);

ptr = kmem_alloc(size, flags | KM_MAYFAIL);
if (ptr)
return ptr;
return __kmem_vmalloc(size, flags);
}
1 change: 0 additions & 1 deletion fs/xfs/kmem.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,6 @@ kmem_flags_convert(xfs_km_flags_t flags)
}

extern void *kmem_alloc(size_t, xfs_km_flags_t);
extern void *kmem_alloc_large(size_t size, xfs_km_flags_t);
static inline void kmem_free(const void *ptr)
{
kvfree(ptr);
Expand Down
2 changes: 1 addition & 1 deletion fs/xfs/libxfs/xfs_attr_leaf.c
Original file line number Diff line number Diff line change
Expand Up @@ -489,7 +489,7 @@ xfs_attr_copy_value(
}

if (!args->value) {
args->value = kmem_alloc_large(valuelen, KM_NOLOCKDEP);
args->value = kvmalloc(valuelen, GFP_KERNEL | __GFP_NOLOCKDEP);
if (!args->value)
return -ENOMEM;
}
Expand Down
14 changes: 8 additions & 6 deletions fs/xfs/scrub/attr.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,11 @@
* reallocating the buffer if necessary. Buffer contents are not preserved
* across a reallocation.
*/
int
static int
xchk_setup_xattr_buf(
struct xfs_scrub *sc,
size_t value_size,
xfs_km_flags_t flags)
gfp_t flags)
{
size_t sz;
struct xchk_xattr_buf *ab = sc->buf;
Expand Down Expand Up @@ -57,7 +57,7 @@ xchk_setup_xattr_buf(
* Don't zero the buffer upon allocation to avoid runtime overhead.
* All users must be careful never to read uninitialized contents.
*/
ab = kmem_alloc_large(sizeof(*ab) + sz, flags);
ab = kvmalloc(sizeof(*ab) + sz, flags);
if (!ab)
return -ENOMEM;

Expand All @@ -79,7 +79,7 @@ xchk_setup_xattr(
* without the inode lock held, which means we can sleep.
*/
if (sc->flags & XCHK_TRY_HARDER) {
error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX, 0);
error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX, GFP_KERNEL);
if (error)
return error;
}
Expand Down Expand Up @@ -138,7 +138,8 @@ xchk_xattr_listent(
* doesn't work, we overload the seen_enough variable to convey
* the error message back to the main scrub function.
*/
error = xchk_setup_xattr_buf(sx->sc, valuelen, KM_MAYFAIL);
error = xchk_setup_xattr_buf(sx->sc, valuelen,
GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (error == -ENOMEM)
error = -EDEADLOCK;
if (error) {
Expand Down Expand Up @@ -323,7 +324,8 @@ xchk_xattr_block(
return 0;

/* Allocate memory for block usage checking. */
error = xchk_setup_xattr_buf(ds->sc, 0, KM_MAYFAIL);
error = xchk_setup_xattr_buf(ds->sc, 0,
GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (error == -ENOMEM)
return -EDEADLOCK;
if (error)
Expand Down
3 changes: 0 additions & 3 deletions fs/xfs/scrub/attr.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,4 @@ xchk_xattr_dstmap(
BITS_TO_LONGS(sc->mp->m_attr_geo->blksize);
}

int xchk_setup_xattr_buf(struct xfs_scrub *sc, size_t value_size,
xfs_km_flags_t flags);

#endif /* __XFS_SCRUB_ATTR_H__ */
4 changes: 2 additions & 2 deletions fs/xfs/xfs_log.c
Original file line number Diff line number Diff line change
Expand Up @@ -1487,8 +1487,8 @@ xlog_alloc_log(
iclog->ic_prev = prev_iclog;
prev_iclog = iclog;

iclog->ic_data = kmem_alloc_large(log->l_iclog_size,
KM_MAYFAIL | KM_ZERO);
iclog->ic_data = kvzalloc(log->l_iclog_size,
GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!iclog->ic_data)
goto out_free_iclog;
#ifdef DEBUG
Expand Down
10 changes: 9 additions & 1 deletion fs/xfs/xfs_log_cil.c
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,15 @@ xlog_cil_alloc_shadow_bufs(
*/
kmem_free(lip->li_lv_shadow);

lv = kmem_alloc_large(buf_size, KM_NOFS);
/*
* We are in transaction context, which means this
* allocation will pick up GFP_NOFS from the
* memalloc_nofs_save/restore context the transaction
* holds. This means we can use GFP_KERNEL here so the
* generic kvmalloc() code will run vmalloc on
* contiguous page allocation failure as we require.
*/
lv = kvmalloc(buf_size, GFP_KERNEL);
memset(lv, 0, xlog_cil_iovec_space(niovecs));

lv->lv_item = lip;
Expand Down
2 changes: 1 addition & 1 deletion fs/xfs/xfs_log_recover.c
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ xlog_alloc_buffer(
if (nbblks > 1 && log->l_sectBBsize > 1)
nbblks += log->l_sectBBsize;
nbblks = round_up(nbblks, log->l_sectBBsize);
return kmem_alloc_large(BBTOB(nbblks), KM_MAYFAIL | KM_ZERO);
return kvzalloc(BBTOB(nbblks), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
}

/*
Expand Down
1 change: 0 additions & 1 deletion fs/xfs/xfs_trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -3774,7 +3774,6 @@ DEFINE_EVENT(xfs_kmem_class, name, \
TP_PROTO(ssize_t size, int flags, unsigned long caller_ip), \
TP_ARGS(size, flags, caller_ip))
DEFINE_KMEM_EVENT(kmem_alloc);
DEFINE_KMEM_EVENT(kmem_alloc_large);

TRACE_EVENT(xfs_check_new_dalign,
TP_PROTO(struct xfs_mount *mp, int new_dalign, xfs_ino_t calc_rootino),
Expand Down

0 comments on commit d634525

Please sign in to comment.