Skip to content

Commit

Permalink
mm, swap: avoid redundant swap device pinning
Browse files Browse the repository at this point in the history
Currently __read_swap_cache_async() has get/put_swap_device() calls to
increase/decrease a swap device reference to prevent swapoff.  While some
of its callers have already held the swap device reference, e.g in
do_swap_page() and shmem_swapin_folio() where __read_swap_cache_async()
will finally called.  Now there are only two callers not holding a swap
device reference, so make them hold a reference instead.  And drop the
get/put_swap_device calls in __read_swap_cache_async.  This should reduce
the overhead for swap in during page fault slightly.

Link: https://lkml.kernel.org/r/20250313165935.63303-4-ryncsn@gmail.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Reviewed-by: Baoquan He <bhe@redhat.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <v-songbaohua@oppo.com>
Cc: Chris Li <chrisl@kernel.org>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kalesh Singh <kaleshsingh@google.com>
Cc: Matthew Wilcow (Oracle) <willy@infradead.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Kairui Song authored and Andrew Morton committed Mar 17, 2025
1 parent 3123fb0 commit 78524b0
Showing 2 changed files with 15 additions and 7 deletions.
14 changes: 8 additions & 6 deletions mm/swap_state.c
Original file line number Diff line number Diff line change
@@ -426,17 +426,13 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
bool skip_if_exists)
{
struct swap_info_struct *si;
struct swap_info_struct *si = swp_swap_info(entry);
struct folio *folio;
struct folio *new_folio = NULL;
struct folio *result = NULL;
void *shadow = NULL;

*new_page_allocated = false;
si = get_swap_device(entry);
if (!si)
return NULL;

for (;;) {
int err;
/*
@@ -532,7 +528,6 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
put_swap_folio(new_folio, entry);
folio_unlock(new_folio);
put_and_return:
put_swap_device(si);
if (!(*new_page_allocated) && new_folio)
folio_put(new_folio);
return result;
@@ -552,18 +547,25 @@ struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr,
struct swap_iocb **plug)
{
struct swap_info_struct *si;
bool page_allocated;
struct mempolicy *mpol;
pgoff_t ilx;
struct folio *folio;

si = get_swap_device(entry);
if (!si)
return NULL;

mpol = get_vma_policy(vma, addr, 0, &ilx);
folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
&page_allocated, false);
mpol_cond_put(mpol);

if (page_allocated)
swap_read_folio(folio, plug);

put_swap_device(si);
return folio;
}

8 changes: 7 additions & 1 deletion mm/zswap.c
Original file line number Diff line number Diff line change
@@ -1051,14 +1051,20 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
struct folio *folio;
struct mempolicy *mpol;
bool folio_was_allocated;
struct swap_info_struct *si;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
};

/* try to allocate swap cache folio */
si = get_swap_device(swpentry);
if (!si)
return -EEXIST;

mpol = get_task_policy(current);
folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
put_swap_device(si);
if (!folio)
return -ENOMEM;

0 comments on commit 78524b0

Please sign in to comment.