Skip to content

Commit

Permalink
mm: thp: consolidate vma size check to transhuge_vma_suitable
Browse files Browse the repository at this point in the history
There are couple of places that check whether the vma size is ok for THP
or whether address fits, they are open coded and duplicate, use
transhuge_vma_suitable() to do the job by passing in (vma->end -
HPAGE_PMD_SIZE).

Move vma size check into hugepage_vma_check().  This will make
khugepaged_enter() is as same as khugepaged_enter_vma().  There is just
one caller for khugepaged_enter(), replace it to khugepaged_enter_vma()
and remove khugepaged_enter().

Link: https://lkml.kernel.org/r/20220616174840.1202070-3-shy828301@gmail.com
Signed-off-by: Yang Shi <shy828301@gmail.com>
Reviewed-by: Zach O'Keefe <zokeefe@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
Yang Shi authored and akpm committed Jul 18, 2022
1 parent 66137fb commit 4fa6893
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 28 deletions.
11 changes: 11 additions & 0 deletions include/linux/huge_mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,17 @@ extern struct kobj_attribute shmem_enabled_attr;

extern unsigned long transparent_hugepage_flags;

/*
* Do the below checks:
* - For file vma, check if the linear page offset of vma is
* HPAGE_PMD_NR aligned within the file. The hugepage is
* guaranteed to be hugepage-aligned within the file, but we must
* check that the PMD-aligned addresses in the VMA map to
* PMD-aligned offsets within the file, else the hugepage will
* not be PMD-mappable.
* - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE
* area.
*/
static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
unsigned long addr)
{
Expand Down
14 changes: 0 additions & 14 deletions include/linux/khugepaged.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,27 +51,13 @@ static inline void khugepaged_exit(struct mm_struct *mm)
if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
__khugepaged_exit(mm);
}

static inline void khugepaged_enter(struct vm_area_struct *vma,
unsigned long vm_flags)
{
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
khugepaged_enabled()) {
if (hugepage_vma_check(vma, vm_flags))
__khugepaged_enter(vma->vm_mm);
}
}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
}
static inline void khugepaged_exit(struct mm_struct *mm)
{
}
static inline void khugepaged_enter(struct vm_area_struct *vma,
unsigned long vm_flags)
{
}
static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
unsigned long vm_flags)
{
Expand Down
2 changes: 1 addition & 1 deletion mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -726,7 +726,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
return VM_FAULT_FALLBACK;
if (unlikely(anon_vma_prepare(vma)))
return VM_FAULT_OOM;
khugepaged_enter(vma, vma->vm_flags);
khugepaged_enter_vma(vma, vma->vm_flags);

if (!(vmf->flags & FAULT_FLAG_WRITE) &&
!mm_forbids_zeropage(vma->vm_mm) &&
Expand Down
19 changes: 6 additions & 13 deletions mm/khugepaged.c
Original file line number Diff line number Diff line change
Expand Up @@ -443,8 +443,8 @@ bool hugepage_vma_check(struct vm_area_struct *vma,
if (vma_is_dax(vma))
return false;

if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
vma->vm_pgoff, HPAGE_PMD_NR))
/* Check alignment for file vma and size for both file and anon vma */
if (!transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE)))
return false;

/* Enabled via shmem mount options or sysfs settings. */
Expand Down Expand Up @@ -505,9 +505,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
unsigned long vm_flags)
{
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
khugepaged_enabled() &&
(((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
(vma->vm_end & HPAGE_PMD_MASK))) {
khugepaged_enabled()) {
if (hugepage_vma_check(vma, vm_flags))
__khugepaged_enter(vma->vm_mm);
}
Expand Down Expand Up @@ -948,7 +946,6 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
struct vm_area_struct **vmap)
{
struct vm_area_struct *vma;
unsigned long hstart, hend;

if (unlikely(khugepaged_test_exit(mm)))
return SCAN_ANY_PROCESS;
Expand All @@ -957,9 +954,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
if (!vma)
return SCAN_VMA_NULL;

hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (address < hstart || address + HPAGE_PMD_SIZE > hend)
if (!transhuge_vma_suitable(vma, address))
return SCAN_ADDRESS_RANGE;
if (!hugepage_vma_check(vma, vma->vm_flags))
return SCAN_VMA_CHECK;
Expand Down Expand Up @@ -2135,10 +2130,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
progress++;
continue;
}
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (hstart >= hend)
goto skip;
hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
if (khugepaged_scan.address > hend)
goto skip;
if (khugepaged_scan.address < hstart)
Expand Down

0 comments on commit 4fa6893

Please sign in to comment.