Skip to content

Commit

Permalink
mm: /proc/pid/clear_refs: avoid split_huge_page()
Browse files Browse the repository at this point in the history
Currently pagewalker splits all THP pages on any clear_refs request.  It's
not necessary.  We can handle this on PMD level.

One side effect is that soft dirty will potentially see more dirty memory,
since we will mark whole THP page dirty at once.

Sanity checked with CRIU test suite. More testing is required.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Reviewed-by: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Kirill A. Shutemov authored and Linus Torvalds committed Feb 12, 2015
1 parent 48684a6 commit 7d5b3bf
Showing 1 changed file with 44 additions and 3 deletions.
47 changes: 44 additions & 3 deletions fs/proc/task_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -739,10 +739,10 @@ struct clear_refs_private {
enum clear_refs_types type;
};

#ifdef CONFIG_MEM_SOFT_DIRTY
static inline void clear_soft_dirty(struct vm_area_struct *vma,
unsigned long addr, pte_t *pte)
{
#ifdef CONFIG_MEM_SOFT_DIRTY
/*
* The soft-dirty tracker uses #PF-s to catch writes
* to pages, so write-protect the pte as well. See the
Expand All @@ -759,9 +759,35 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
}

set_pte_at(vma->vm_mm, addr, pte, ptent);
#endif
}

static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
pmd_t pmd = *pmdp;

pmd = pmd_wrprotect(pmd);
pmd = pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);

if (vma->vm_flags & VM_SOFTDIRTY)
vma->vm_flags &= ~VM_SOFTDIRTY;

set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
}

#else

static inline void clear_soft_dirty(struct vm_area_struct *vma,
unsigned long addr, pte_t *pte)
{
}

static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
}
#endif

static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
Expand All @@ -771,7 +797,22 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
spinlock_t *ptl;
struct page *page;

split_huge_page_pmd(vma, addr, pmd);
if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
clear_soft_dirty_pmd(vma, addr, pmd);
goto out;
}

page = pmd_page(*pmd);

/* Clear accessed and referenced bits. */
pmdp_test_and_clear_young(vma, addr, pmd);
ClearPageReferenced(page);
out:
spin_unlock(ptl);
return 0;
}

if (pmd_trans_unstable(pmd))
return 0;

Expand Down

0 comments on commit 7d5b3bf

Please sign in to comment.