Skip to content

Commit

Permalink
mm: gup: remove FOLL_SPLIT
Browse files Browse the repository at this point in the history
Since commit 5a52c9d ("uprobe: use FOLL_SPLIT_PMD instead of
FOLL_SPLIT") and commit ba925fa ("s390/gmap: improve THP splitting")
FOLL_SPLIT has not been used anymore.  Remove the dead code.

Link: https://lkml.kernel.org/r/20210330203900.9222-1-shy828301@gmail.com
Signed-off-by: Yang Shi <shy828301@gmail.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Yang Shi authored and Linus Torvalds committed Apr 30, 2021
1 parent 1d4b016 commit 4066c11
Show file tree
Hide file tree
Showing 3 changed files with 2 additions and 32 deletions.
5 changes: 0 additions & 5 deletions Documentation/vm/transhuge.rst
Original file line number Diff line number Diff line change
Expand Up @@ -53,11 +53,6 @@ prevent the page from being split by anyone.
of handling GUP on hugetlbfs will also work fine on transparent
hugepage backed mappings.

In case you can't handle compound pages if they're returned by
follow_page, the FOLL_SPLIT bit can be specified as a parameter to
follow_page, so that it will split the hugepages before returning
them.

Graceful fallback
=================

Expand Down
1 change: 0 additions & 1 deletion include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -2791,7 +2791,6 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO
* and return without waiting upon it */
#define FOLL_POPULATE 0x40 /* fault in page */
#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
Expand Down
28 changes: 2 additions & 26 deletions mm/gup.c
Original file line number Diff line number Diff line change
Expand Up @@ -516,18 +516,6 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
}
}

if (flags & FOLL_SPLIT && PageTransCompound(page)) {
get_page(page);
pte_unmap_unlock(ptep, ptl);
lock_page(page);
ret = split_huge_page(page);
unlock_page(page);
put_page(page);
if (ret)
return ERR_PTR(ret);
goto retry;
}

/* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
if (unlikely(!try_grab_page(page, flags))) {
page = ERR_PTR(-ENOMEM);
Expand Down Expand Up @@ -672,7 +660,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
spin_unlock(ptl);
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
}
if (flags & (FOLL_SPLIT | FOLL_SPLIT_PMD)) {
if (flags & FOLL_SPLIT_PMD) {
int ret;
page = pmd_page(*pmd);
if (is_huge_zero_page(page)) {
Expand All @@ -681,19 +669,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
split_huge_pmd(vma, pmd, address);
if (pmd_trans_unstable(pmd))
ret = -EBUSY;
} else if (flags & FOLL_SPLIT) {
if (unlikely(!try_get_page(page))) {
spin_unlock(ptl);
return ERR_PTR(-ENOMEM);
}
spin_unlock(ptl);
lock_page(page);
ret = split_huge_page(page);
unlock_page(page);
put_page(page);
if (pmd_none(*pmd))
return no_page_table(vma, flags);
} else { /* flags & FOLL_SPLIT_PMD */
} else {
spin_unlock(ptl);
split_huge_pmd(vma, pmd, address);
ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
Expand Down

0 comments on commit 4066c11

Please sign in to comment.