Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 332461
b: refs/heads/master
c: 2ec74c3
h: refs/heads/master
i:
  332459: d11a9ca
v: v3
  • Loading branch information
Sagi Grimberg authored and Linus Torvalds committed Oct 9, 2012
1 parent 2b2f731 commit 8422205
Show file tree
Hide file tree
Showing 8 changed files with 93 additions and 77 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 36e4f20af833d1ce196e6a4ade05dc26c44652d1
refs/heads/master: 2ec74c3ef2d8c58d71e0e00336fb6b891192155a
47 changes: 0 additions & 47 deletions trunk/include/linux/mmu_notifier.h
Original file line number Diff line number Diff line change
Expand Up @@ -246,50 +246,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
__mmu_notifier_mm_destroy(mm);
}

/*
* These two macros will sometime replace ptep_clear_flush.
* ptep_clear_flush is implemented as macro itself, so this also is
* implemented as a macro until ptep_clear_flush will converted to an
* inline function, to diminish the risk of compilation failure. The
* invalidate_page method over time can be moved outside the PT lock
* and these two macros can be later removed.
*/
#define ptep_clear_flush_notify(__vma, __address, __ptep) \
({ \
pte_t __pte; \
struct vm_area_struct *___vma = __vma; \
unsigned long ___address = __address; \
__pte = ptep_clear_flush(___vma, ___address, __ptep); \
mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
__pte; \
})

#define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
({ \
pmd_t __pmd; \
struct vm_area_struct *___vma = __vma; \
unsigned long ___address = __address; \
VM_BUG_ON(__address & ~HPAGE_PMD_MASK); \
mmu_notifier_invalidate_range_start(___vma->vm_mm, ___address, \
(__address)+HPAGE_PMD_SIZE);\
__pmd = pmdp_clear_flush(___vma, ___address, __pmdp); \
mmu_notifier_invalidate_range_end(___vma->vm_mm, ___address, \
(__address)+HPAGE_PMD_SIZE); \
__pmd; \
})

#define pmdp_splitting_flush_notify(__vma, __address, __pmdp) \
({ \
struct vm_area_struct *___vma = __vma; \
unsigned long ___address = __address; \
VM_BUG_ON(__address & ~HPAGE_PMD_MASK); \
mmu_notifier_invalidate_range_start(___vma->vm_mm, ___address, \
(__address)+HPAGE_PMD_SIZE);\
pmdp_splitting_flush(___vma, ___address, __pmdp); \
mmu_notifier_invalidate_range_end(___vma->vm_mm, ___address, \
(__address)+HPAGE_PMD_SIZE); \
})

#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
({ \
int __young; \
Expand Down Expand Up @@ -380,9 +336,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)

#define ptep_clear_flush_young_notify ptep_clear_flush_young
#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
#define ptep_clear_flush_notify ptep_clear_flush
#define pmdp_clear_flush_notify pmdp_clear_flush
#define pmdp_splitting_flush_notify pmdp_splitting_flush
#define set_pte_at_notify set_pte_at

#endif /* CONFIG_MMU_NOTIFIER */
Expand Down
4 changes: 3 additions & 1 deletion trunk/mm/filemap_xip.c
Original file line number Diff line number Diff line change
Expand Up @@ -192,11 +192,13 @@ __xip_unmap (struct address_space * mapping,
if (pte) {
/* Nuke the page table entry. */
flush_cache_page(vma, address, pte_pfn(*pte));
pteval = ptep_clear_flush_notify(vma, address, pte);
pteval = ptep_clear_flush(vma, address, pte);
page_remove_rmap(page);
dec_mm_counter(mm, MM_FILEPAGES);
BUG_ON(pte_dirty(pteval));
pte_unmap_unlock(pte, ptl);
/* must invalidate_page _before_ freeing the page */
mmu_notifier_invalidate_page(mm, address);
page_cache_release(page);
}
}
Expand Down
42 changes: 36 additions & 6 deletions trunk/mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -787,6 +787,8 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
pmd_t _pmd;
int ret = 0, i;
struct page **pages;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */

pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
GFP_KERNEL);
Expand Down Expand Up @@ -823,12 +825,16 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
cond_resched();
}

mmun_start = haddr;
mmun_end = haddr + HPAGE_PMD_SIZE;
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);

spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_same(*pmd, orig_pmd)))
goto out_free_pages;
VM_BUG_ON(!PageHead(page));

pmdp_clear_flush_notify(vma, haddr, pmd);
pmdp_clear_flush(vma, haddr, pmd);
/* leave pmd empty until pte is filled */

pgtable = pgtable_trans_huge_withdraw(mm);
Expand All @@ -851,6 +857,8 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
page_remove_rmap(page);
spin_unlock(&mm->page_table_lock);

mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);

ret |= VM_FAULT_WRITE;
put_page(page);

Expand All @@ -859,6 +867,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,

out_free_pages:
spin_unlock(&mm->page_table_lock);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
mem_cgroup_uncharge_start();
for (i = 0; i < HPAGE_PMD_NR; i++) {
mem_cgroup_uncharge_page(pages[i]);
Expand All @@ -875,6 +884,8 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
int ret = 0;
struct page *page, *new_page;
unsigned long haddr;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */

VM_BUG_ON(!vma->anon_vma);
spin_lock(&mm->page_table_lock);
Expand Down Expand Up @@ -925,31 +936,39 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
__SetPageUptodate(new_page);

mmun_start = haddr;
mmun_end = haddr + HPAGE_PMD_SIZE;
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);

spin_lock(&mm->page_table_lock);
put_page(page);
if (unlikely(!pmd_same(*pmd, orig_pmd))) {
spin_unlock(&mm->page_table_lock);
mem_cgroup_uncharge_page(new_page);
put_page(new_page);
goto out;
goto out_mn;
} else {
pmd_t entry;
VM_BUG_ON(!PageHead(page));
entry = mk_pmd(new_page, vma->vm_page_prot);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
entry = pmd_mkhuge(entry);
pmdp_clear_flush_notify(vma, haddr, pmd);
pmdp_clear_flush(vma, haddr, pmd);
page_add_new_anon_rmap(new_page, vma, haddr);
set_pmd_at(mm, haddr, pmd, entry);
update_mmu_cache(vma, address, pmd);
page_remove_rmap(page);
put_page(page);
ret |= VM_FAULT_WRITE;
}
out_unlock:
spin_unlock(&mm->page_table_lock);
out_mn:
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
out:
return ret;
out_unlock:
spin_unlock(&mm->page_table_lock);
return ret;
}

struct page *follow_trans_huge_pmd(struct mm_struct *mm,
Expand Down Expand Up @@ -1162,7 +1181,11 @@ static int __split_huge_page_splitting(struct page *page,
struct mm_struct *mm = vma->vm_mm;
pmd_t *pmd;
int ret = 0;
/* For mmu_notifiers */
const unsigned long mmun_start = address;
const unsigned long mmun_end = address + HPAGE_PMD_SIZE;

mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
spin_lock(&mm->page_table_lock);
pmd = page_check_address_pmd(page, mm, address,
PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
Expand All @@ -1174,10 +1197,11 @@ static int __split_huge_page_splitting(struct page *page,
* and it won't wait on the anon_vma->root->mutex to
* serialize against split_huge_page*.
*/
pmdp_splitting_flush_notify(vma, address, pmd);
pmdp_splitting_flush(vma, address, pmd);
ret = 1;
}
spin_unlock(&mm->page_table_lock);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);

return ret;
}
Expand Down Expand Up @@ -1898,6 +1922,8 @@ static void collapse_huge_page(struct mm_struct *mm,
spinlock_t *ptl;
int isolated;
unsigned long hstart, hend;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */

VM_BUG_ON(address & ~HPAGE_PMD_MASK);

Expand Down Expand Up @@ -1952,15 +1978,19 @@ static void collapse_huge_page(struct mm_struct *mm,
pte = pte_offset_map(pmd, address);
ptl = pte_lockptr(mm, pmd);

mmun_start = address;
mmun_end = address + HPAGE_PMD_SIZE;
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
spin_lock(&mm->page_table_lock); /* probably unnecessary */
/*
* After this gup_fast can't run anymore. This also removes
* any huge TLB entry from the CPU so we won't allow
* huge and small TLB entries for the same virtual address
* to avoid the risk of CPU bugs in that area.
*/
_pmd = pmdp_clear_flush_notify(vma, address, pmd);
_pmd = pmdp_clear_flush(vma, address, pmd);
spin_unlock(&mm->page_table_lock);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);

spin_lock(ptl);
isolated = __collapse_huge_page_isolate(vma, address, pte);
Expand Down
21 changes: 13 additions & 8 deletions trunk/mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -2355,13 +2355,15 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
struct page *page;
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
const unsigned long mmun_start = start; /* For mmu_notifiers */
const unsigned long mmun_end = end; /* For mmu_notifiers */

WARN_ON(!is_vm_hugetlb_page(vma));
BUG_ON(start & ~huge_page_mask(h));
BUG_ON(end & ~huge_page_mask(h));

tlb_start_vma(tlb, vma);
mmu_notifier_invalidate_range_start(mm, start, end);
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
again:
spin_lock(&mm->page_table_lock);
for (address = start; address < end; address += sz) {
Expand Down Expand Up @@ -2425,7 +2427,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (address < end && !ref_page)
goto again;
}
mmu_notifier_invalidate_range_end(mm, start, end);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
tlb_end_vma(tlb, vma);
}

Expand Down Expand Up @@ -2525,6 +2527,8 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *old_page, *new_page;
int avoidcopy;
int outside_reserve = 0;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */

old_page = pte_page(pte);

Expand Down Expand Up @@ -2611,6 +2615,9 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
pages_per_huge_page(h));
__SetPageUptodate(new_page);

mmun_start = address & huge_page_mask(h);
mmun_end = mmun_start + huge_page_size(h);
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
/*
* Retake the page_table_lock to check for racing updates
* before the page tables are altered
Expand All @@ -2619,20 +2626,18 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
ptep = huge_pte_offset(mm, address & huge_page_mask(h));
if (likely(pte_same(huge_ptep_get(ptep), pte))) {
/* Break COW */
mmu_notifier_invalidate_range_start(mm,
address & huge_page_mask(h),
(address & huge_page_mask(h)) + huge_page_size(h));
huge_ptep_clear_flush(vma, address, ptep);
set_huge_pte_at(mm, address, ptep,
make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page);
hugepage_add_new_anon_rmap(new_page, vma, address);
/* Make the old page be freed below */
new_page = old_page;
mmu_notifier_invalidate_range_end(mm,
address & huge_page_mask(h),
(address & huge_page_mask(h)) + huge_page_size(h));
}
spin_unlock(&mm->page_table_lock);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
/* Caller expects lock to be held */
spin_lock(&mm->page_table_lock);
page_cache_release(new_page);
page_cache_release(old_page);
return 0;
Expand Down
28 changes: 19 additions & 9 deletions trunk/mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -712,7 +712,7 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
add_taint(TAINT_BAD_PAGE);
}

static inline int is_cow_mapping(vm_flags_t flags)
static inline bool is_cow_mapping(vm_flags_t flags)
{
return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
}
Expand Down Expand Up @@ -1039,6 +1039,9 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
unsigned long next;
unsigned long addr = vma->vm_start;
unsigned long end = vma->vm_end;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
bool is_cow;
int ret;

/*
Expand Down Expand Up @@ -1072,8 +1075,12 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
* parent mm. And a permission downgrade will only happen if
* is_cow_mapping() returns true.
*/
if (is_cow_mapping(vma->vm_flags))
mmu_notifier_invalidate_range_start(src_mm, addr, end);
is_cow = is_cow_mapping(vma->vm_flags);
mmun_start = addr;
mmun_end = end;
if (is_cow)
mmu_notifier_invalidate_range_start(src_mm, mmun_start,
mmun_end);

ret = 0;
dst_pgd = pgd_offset(dst_mm, addr);
Expand All @@ -1089,9 +1096,8 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
}
} while (dst_pgd++, src_pgd++, addr = next, addr != end);

if (is_cow_mapping(vma->vm_flags))
mmu_notifier_invalidate_range_end(src_mm,
vma->vm_start, end);
if (is_cow)
mmu_notifier_invalidate_range_end(src_mm, mmun_start, mmun_end);
return ret;
}

Expand Down Expand Up @@ -2516,7 +2522,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
spinlock_t *ptl, pte_t orig_pte)
__releases(ptl)
{
struct page *old_page, *new_page;
struct page *old_page, *new_page = NULL;
pte_t entry;
int ret = 0;
int page_mkwrite = 0;
Expand Down Expand Up @@ -2760,10 +2766,14 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
} else
mem_cgroup_uncharge_page(new_page);

if (new_page)
page_cache_release(new_page);
unlock:
pte_unmap_unlock(page_table, ptl);
if (new_page) {
if (new_page == old_page)
/* cow happened, notify before releasing old_page */
mmu_notifier_invalidate_page(mm, address);
page_cache_release(new_page);
}
if (old_page) {
/*
* Don't let another task, with possibly unlocked vma,
Expand Down
8 changes: 6 additions & 2 deletions trunk/mm/mremap.c
Original file line number Diff line number Diff line change
Expand Up @@ -149,11 +149,15 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long extent, next, old_end;
pmd_t *old_pmd, *new_pmd;
bool need_flush = false;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */

old_end = old_addr + len;
flush_cache_range(vma, old_addr, old_end);

mmu_notifier_invalidate_range_start(vma->vm_mm, old_addr, old_end);
mmun_start = old_addr;
mmun_end = old_end;
mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);

for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
cond_resched();
Expand Down Expand Up @@ -197,7 +201,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
if (likely(need_flush))
flush_tlb_range(vma, old_end-len, old_addr);

mmu_notifier_invalidate_range_end(vma->vm_mm, old_end-len, old_end);
mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);

return len + old_addr - old_end; /* how much done */
}
Expand Down
Loading

0 comments on commit 8422205

Please sign in to comment.