Skip to content

Commit

Permalink
mm/huge_memory: remove the old, unused __split_huge_page()
Browse files Browse the repository at this point in the history
Now split_huge_page_to_list_to_order() uses the new backend split code in
__split_unmapped_folio(), the old __split_huge_page() and
__split_huge_page_tail() can be removed.

Link: https://lkml.kernel.org/r/20250307174001.242794-6-ziy@nvidia.com
Signed-off-by: Zi Yan <ziy@nvidia.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Kirill A. Shuemov <kirill.shutemov@linux.intel.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yang Shi <yang@os.amperecomputing.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Kairui Song <kasong@tencent.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
Zi Yan authored and Andrew Morton committed Mar 18, 2025
1 parent 58729c0 commit 1f43d5a
Showing 1 changed file with 0 additions and 215 deletions.
215 changes: 0 additions & 215 deletions mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -3284,221 +3284,6 @@ static void lru_add_page_tail(struct folio *folio, struct page *tail,
}
}

static void __split_huge_page_tail(struct folio *folio, int tail,
struct lruvec *lruvec, struct list_head *list,
unsigned int new_order)
{
struct page *head = &folio->page;
struct page *page_tail = head + tail;
/*
* Careful: new_folio is not a "real" folio before we cleared PageTail.
* Don't pass it around before clear_compound_head().
*/
struct folio *new_folio = (struct folio *)page_tail;

VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);

/*
* Clone page flags before unfreezing refcount.
*
* After successful get_page_unless_zero() might follow flags change,
* for example lock_page() which set PG_waiters.
*
* Note that for mapped sub-pages of an anonymous THP,
* PG_anon_exclusive has been cleared in unmap_folio() and is stored in
* the migration entry instead from where remap_page() will restore it.
* We can still have PG_anon_exclusive set on effectively unmapped and
* unreferenced sub-pages of an anonymous THP: we can simply drop
* PG_anon_exclusive (-> PG_mappedtodisk) for these here.
*/
page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
page_tail->flags |= (head->flags &
((1L << PG_referenced) |
(1L << PG_swapbacked) |
(1L << PG_swapcache) |
(1L << PG_mlocked) |
(1L << PG_uptodate) |
(1L << PG_active) |
(1L << PG_workingset) |
(1L << PG_locked) |
(1L << PG_unevictable) |
#ifdef CONFIG_ARCH_USES_PG_ARCH_2
(1L << PG_arch_2) |
#endif
#ifdef CONFIG_ARCH_USES_PG_ARCH_3
(1L << PG_arch_3) |
#endif
(1L << PG_dirty) |
LRU_GEN_MASK | LRU_REFS_MASK));

/* ->mapping in first and second tail page is replaced by other uses */
VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
page_tail);
new_folio->mapping = folio->mapping;
new_folio->index = folio->index + tail;

/*
* page->private should not be set in tail pages. Fix up and warn once
* if private is unexpectedly set.
*/
if (unlikely(page_tail->private)) {
VM_WARN_ON_ONCE_PAGE(true, page_tail);
page_tail->private = 0;
}
if (folio_test_swapcache(folio))
new_folio->swap.val = folio->swap.val + tail;

/* Page flags must be visible before we make the page non-compound. */
smp_wmb();

/*
* Clear PageTail before unfreezing page refcount.
*
* After successful get_page_unless_zero() might follow put_page()
* which needs correct compound_head().
*/
clear_compound_head(page_tail);
if (new_order) {
prep_compound_page(page_tail, new_order);
folio_set_large_rmappable(new_folio);
}

/* Finally unfreeze refcount. Additional reference from page cache. */
page_ref_unfreeze(page_tail,
1 + ((!folio_test_anon(folio) || folio_test_swapcache(folio)) ?
folio_nr_pages(new_folio) : 0));

if (folio_test_young(folio))
folio_set_young(new_folio);
if (folio_test_idle(folio))
folio_set_idle(new_folio);

folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));

/*
* always add to the tail because some iterators expect new
* pages to show after the currently processed elements - e.g.
* migrate_pages
*/
lru_add_page_tail(folio, page_tail, lruvec, list);
}

static void __split_huge_page(struct page *page, struct list_head *list,
pgoff_t end, unsigned int new_order)
{
struct folio *folio = page_folio(page);
struct page *head = &folio->page;
struct lruvec *lruvec;
struct address_space *swap_cache = NULL;
unsigned long offset = 0;
int i, nr_dropped = 0;
unsigned int new_nr = 1 << new_order;
int order = folio_order(folio);
unsigned int nr = 1 << order;

/*
* Reset any memcg data overlay in the tail pages. folio_nr_pages()
* is unreliable after this point.
*/
#ifdef NR_PAGES_IN_LARGE_FOLIO
folio->_nr_pages = 0;
#endif

/* complete memcg works before add pages to LRU */
split_page_memcg(head, order, new_order);

if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
offset = swap_cache_index(folio->swap);
swap_cache = swap_address_space(folio->swap);
xa_lock(&swap_cache->i_pages);
}

/* lock lru list/PageCompound, ref frozen by page_ref_freeze */
lruvec = folio_lruvec_lock(folio);

folio_clear_has_hwpoisoned(folio);

for (i = nr - new_nr; i >= new_nr; i -= new_nr) {
struct folio *tail;
__split_huge_page_tail(folio, i, lruvec, list, new_order);
tail = page_folio(head + i);
/* Some pages can be beyond EOF: drop them from page cache */
if (tail->index >= end) {
if (shmem_mapping(folio->mapping))
nr_dropped += new_nr;
else if (folio_test_clear_dirty(tail))
folio_account_cleaned(tail,
inode_to_wb(folio->mapping->host));
__filemap_remove_folio(tail, NULL);
folio_put_refs(tail, folio_nr_pages(tail));
} else if (!folio_test_anon(folio)) {
__xa_store(&folio->mapping->i_pages, tail->index,
tail, 0);
} else if (swap_cache) {
__xa_store(&swap_cache->i_pages, offset + i,
tail, 0);
}
}

if (!new_order)
ClearPageCompound(head);
else {
struct folio *new_folio = (struct folio *)head;

folio_set_order(new_folio, new_order);
}
unlock_page_lruvec(lruvec);
/* Caller disabled irqs, so they are still disabled here */

split_page_owner(head, order, new_order);
pgalloc_tag_split(folio, order, new_order);

/* See comment in __split_huge_page_tail() */
if (folio_test_anon(folio)) {
/* Additional pin to swap cache */
if (folio_test_swapcache(folio)) {
folio_ref_add(folio, 1 + new_nr);
xa_unlock(&swap_cache->i_pages);
} else {
folio_ref_inc(folio);
}
} else {
/* Additional pin to page cache */
folio_ref_add(folio, 1 + new_nr);
xa_unlock(&folio->mapping->i_pages);
}
local_irq_enable();

if (nr_dropped)
shmem_uncharge(folio->mapping->host, nr_dropped);
remap_page(folio, nr, PageAnon(head) ? RMP_USE_SHARED_ZEROPAGE : 0);

/*
* set page to its compound_head when split to non order-0 pages, so
* we can skip unlocking it below, since PG_locked is transferred to
* the compound_head of the page and the caller will unlock it.
*/
if (new_order)
page = compound_head(page);

for (i = 0; i < nr; i += new_nr) {
struct page *subpage = head + i;
struct folio *new_folio = page_folio(subpage);
if (subpage == page)
continue;
folio_unlock(new_folio);

/*
* Subpages may be freed if there wasn't any mapping
* like if add_to_swap() is running on a lru page that
* had its mapping zapped. And freeing these pages
* requires taking the lru_lock so we do the put_page
* of the tail pages after the split is complete.
*/
free_page_and_swap_cache(subpage);
}
}

/* Racy check whether the huge page can be split */
bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
{
Expand Down

0 comments on commit 1f43d5a

Please sign in to comment.