Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 320868
b: refs/heads/master
c: d833352
h: refs/heads/master
v: v3
  • Loading branch information
Mel Gorman authored and Linus Torvalds committed Aug 1, 2012
1 parent d5e8dbb commit 882aa06
Show file tree
Hide file tree
Showing 4 changed files with 39 additions and 4 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 09c231cb8bfdc35e7d896850d34440b8553b084f
refs/heads/master: d833352a4338dc31295ed832a30c9ccff5c7a183
11 changes: 11 additions & 0 deletions trunk/include/linux/hugetlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,10 @@ int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
unsigned long *, int *, int, unsigned int flags);
void unmap_hugepage_range(struct vm_area_struct *,
unsigned long, unsigned long, struct page *);
void __unmap_hugepage_range_final(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct page *ref_page);
void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct page *ref_page);
Expand Down Expand Up @@ -130,6 +134,13 @@ static inline void copy_huge_page(struct page *dst, struct page *src)

#define hugetlb_change_protection(vma, address, end, newprot)

static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page)
{
BUG();
}

static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page)
Expand Down
28 changes: 26 additions & 2 deletions trunk/mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -2429,6 +2429,25 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
tlb_end_vma(tlb, vma);
}

void __unmap_hugepage_range_final(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page)
{
__unmap_hugepage_range(tlb, vma, start, end, ref_page);

/*
* Clear this flag so that x86's huge_pmd_share page_table_shareable
* test will fail on a vma being torn down, and not grab a page table
* on its way out. We're lucky that the flag has such an appropriate
* name, and can in fact be safely cleared here. We could clear it
* before the __unmap_hugepage_range above, but all that's necessary
* is to clear it before releasing the i_mmap_mutex. This works
* because in the context this is called, the VMA is about to be
* destroyed and the i_mmap_mutex is held.
*/
vma->vm_flags &= ~VM_MAYSHARE;
}

void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page)
{
Expand Down Expand Up @@ -3012,9 +3031,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
}
}
spin_unlock(&mm->page_table_lock);
mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);

/*
* Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
* may have cleared our pud entry and done put_page on the page table:
* once we release i_mmap_mutex, another task can do the final put_page
* and that page table be reused and filled with junk.
*/
flush_tlb_range(vma, start, end);
mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
}

int hugetlb_reserve_pages(struct inode *inode,
Expand Down
2 changes: 1 addition & 1 deletion trunk/mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -1345,7 +1345,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
*/
if (vma->vm_file) {
mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
__unmap_hugepage_range(tlb, vma, start, end, NULL);
__unmap_hugepage_range_final(tlb, vma, start, end, NULL);
mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
}
} else
Expand Down

0 comments on commit 882aa06

Please sign in to comment.