Skip to content

Commit

Permalink
mm, thp: make swapin readahead under down_read of mmap_sem
Browse files Browse the repository at this point in the history
Currently khugepaged makes swapin readahead under down_write.  This
patch supplies to make swapin readahead under down_read instead of
down_write.

The patch was tested with a test program that allocates 800MB of memory,
writes to it, and then sleeps.  The system was forced to swap out all.
Afterwards, the test program touches the area by writing, it skips a
page in each 20 pages of the area.

[akpm@linux-foundation.org: update comment to match new code]
[kirill.shutemov@linux.intel.com: passing 'vma' to hugepage_vma_revlidate() is useless]
  Link: http://lkml.kernel.org/r/20160530095058.GA53044@black.fi.intel.com
  Link: http://lkml.kernel.org/r/1466021202-61880-3-git-send-email-kirill.shutemov@linux.intel.com
Link: http://lkml.kernel.org/r/1464335964-6510-4-git-send-email-ebru.akagunduz@gmail.com
Link: http://lkml.kernel.org/r/1466021202-61880-2-git-send-email-kirill.shutemov@linux.intel.com
Signed-off-by: Ebru Akagunduz <ebru.akagunduz@gmail.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Ebru Akagunduz authored and Linus Torvalds committed Jul 26, 2016
1 parent 8a966ed commit 7269586
Showing 1 changed file with 62 additions and 29 deletions.
91 changes: 62 additions & 29 deletions mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -2373,6 +2373,34 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
return !(vma->vm_flags & VM_NO_THP);
}

/*
* If mmap_sem temporarily dropped, revalidate vma
* before taking mmap_sem.
* Return 0 if succeeds, otherwise return none-zero
* value (scan code).
*/

static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address)
{
struct vm_area_struct *vma;
unsigned long hstart, hend;

if (unlikely(khugepaged_test_exit(mm)))
return SCAN_ANY_PROCESS;

vma = find_vma(mm, address);
if (!vma)
return SCAN_VMA_NULL;

hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (address < hstart || address + HPAGE_PMD_SIZE > hend)
return SCAN_ADDRESS_RANGE;
if (!hugepage_vma_check(vma))
return SCAN_VMA_CHECK;
return 0;
}

/*
* Bring missing pages in from swap, to complete THP collapse.
* Only done if khugepaged_scan_pmd believes it is worthwhile.
Expand All @@ -2381,7 +2409,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
* but with mmap_sem held to protect against vma changes.
*/

static void __collapse_huge_page_swapin(struct mm_struct *mm,
static bool __collapse_huge_page_swapin(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd)
{
Expand All @@ -2397,18 +2425,26 @@ static void __collapse_huge_page_swapin(struct mm_struct *mm,
continue;
swapped_in++;
ret = do_swap_page(mm, vma, _address, pte, pmd,
FAULT_FLAG_ALLOW_RETRY|FAULT_FLAG_RETRY_NOWAIT,
FAULT_FLAG_ALLOW_RETRY,
pteval);
/* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
if (ret & VM_FAULT_RETRY) {
down_read(&mm->mmap_sem);
/* vma is no longer available, don't continue to swapin */
if (hugepage_vma_revalidate(mm, address))
return false;
}
if (ret & VM_FAULT_ERROR) {
trace_mm_collapse_huge_page_swapin(mm, swapped_in, 0);
return;
return false;
}
/* pte is unmapped now, we need to map it */
pte = pte_offset_map(pmd, _address);
}
pte--;
pte_unmap(pte);
trace_mm_collapse_huge_page_swapin(mm, swapped_in, 1);
return true;
}

static void collapse_huge_page(struct mm_struct *mm,
Expand All @@ -2423,7 +2459,6 @@ static void collapse_huge_page(struct mm_struct *mm,
struct page *new_page;
spinlock_t *pmd_ptl, *pte_ptl;
int isolated = 0, result = 0;
unsigned long hstart, hend;
struct mem_cgroup *memcg;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
Expand All @@ -2446,39 +2481,37 @@ static void collapse_huge_page(struct mm_struct *mm,
goto out_nolock;
}

/*
* Prevent all access to pagetables with the exception of
* gup_fast later hanlded by the ptep_clear_flush and the VM
* handled by the anon_vma lock + PG_lock.
*/
down_write(&mm->mmap_sem);
if (unlikely(khugepaged_test_exit(mm))) {
result = SCAN_ANY_PROCESS;
down_read(&mm->mmap_sem);
result = hugepage_vma_revalidate(mm, address);
if (result)
goto out;
}

vma = find_vma(mm, address);
if (!vma) {
result = SCAN_VMA_NULL;
goto out;
}
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (address < hstart || address + HPAGE_PMD_SIZE > hend) {
result = SCAN_ADDRESS_RANGE;
goto out;
}
if (!hugepage_vma_check(vma)) {
result = SCAN_VMA_CHECK;
goto out;
}
pmd = mm_find_pmd(mm, address);
if (!pmd) {
result = SCAN_PMD_NULL;
goto out;
}

__collapse_huge_page_swapin(mm, vma, address, pmd);
/*
* __collapse_huge_page_swapin always returns with mmap_sem locked.
* If it fails, release mmap_sem and jump directly out.
* Continuing to collapse causes inconsistency.
*/
if (!__collapse_huge_page_swapin(mm, vma, address, pmd)) {
up_read(&mm->mmap_sem);
goto out;
}

up_read(&mm->mmap_sem);
/*
* Prevent all access to pagetables with the exception of
* gup_fast later handled by the ptep_clear_flush and the VM
* handled by the anon_vma lock + PG_lock.
*/
down_write(&mm->mmap_sem);
result = hugepage_vma_revalidate(mm, address);
if (result)
goto out;

anon_vma_lock_write(vma->anon_vma);

Expand Down

0 comments on commit 7269586

Please sign in to comment.