diff --git a/[refs] b/[refs] index eac25f624d59..99e1e2675f31 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 70da2340fbc68e91e701762f785479ab495a0869 +refs/heads/master: 4eb2b1dcd598f8489130405c81c60c289896d92a diff --git a/trunk/mm/hugetlb.c b/trunk/mm/hugetlb.c index 2ef66a2a148d..6df8065039eb 100644 --- a/trunk/mm/hugetlb.c +++ b/trunk/mm/hugetlb.c @@ -2237,6 +2237,12 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, + (vma->vm_pgoff >> PAGE_SHIFT); mapping = (struct address_space *)page_private(page); + /* + * Take the mapping lock for the duration of the table walk. As + * this mapping should be shared between all the VMAs, + * __unmap_hugepage_range() is called as the lock is already held + */ + spin_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) { /* Do not unmap the current VMA */ if (iter_vma == vma) @@ -2250,10 +2256,11 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, * from the time of fork. This would look like data corruption */ if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) - unmap_hugepage_range(iter_vma, + __unmap_hugepage_range(iter_vma, address, address + huge_page_size(h), page); } + spin_unlock(&mapping->i_mmap_lock); return 1; }