Skip to content

Commit

Permalink
hugepage: fix broken check for offset alignment in hugepage mappings
Browse files Browse the repository at this point in the history
For hugepage mappings, the file offset, like the address and size, needs to
be aligned to the size of a hugepage.

In commit 68589bc, the check for this was
moved into prepare_hugepage_range() along with the address and size checks.
 But since BenH's rework of the get_unmapped_area() paths leading up to
commit 4b1d892, prepare_hugepage_range()
is only called for MAP_FIXED mappings, not for other mappings.  This means
we're no longer ever checking for an aligned offset - I've confirmed that
mmap() will (apparently) succeed with a misaligned offset on both powerpc
and i386 at least.

This patch restores the check, removing it from prepare_hugepage_range()
and putting it back into hugetlbfs_file_mmap().  I'm putting it there,
rather than in the get_unmapped_area() path so it only needs to go in one
place, than separately in the half-dozen or so arch-specific
implementations of hugetlb_get_unmapped_area().

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Cc: Adam Litke <agl@us.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
David Gibson authored and Linus Torvalds committed Aug 31, 2007
1 parent 4a58448 commit dec4ad8
Show file tree
Hide file tree
Showing 5 changed files with 17 additions and 18 deletions.
2 changes: 1 addition & 1 deletion arch/i386/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -367,7 +367,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
return -ENOMEM;

if (flags & MAP_FIXED) {
if (prepare_hugepage_range(addr, len, pgoff))
if (prepare_hugepage_range(addr, len))
return -EINVAL;
return addr;
}
Expand Down
6 changes: 2 additions & 4 deletions arch/ia64/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -75,10 +75,8 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
* Don't actually need to do any preparation, but need to make sure
* the address is in the right region.
*/
int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
int prepare_hugepage_range(unsigned long addr, unsigned long len)
{
if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
return -EINVAL;
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
Expand Down Expand Up @@ -151,7 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u

/* Handle MAP_FIXED */
if (flags & MAP_FIXED) {
if (prepare_hugepage_range(addr, len, pgoff))
if (prepare_hugepage_range(addr, len))
return -EINVAL;
return addr;
}
Expand Down
2 changes: 1 addition & 1 deletion arch/sparc64/mm/hugetlbpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
return -ENOMEM;

if (flags & MAP_FIXED) {
if (prepare_hugepage_range(addr, len, pgoff))
if (prepare_hugepage_range(addr, len))
return -EINVAL;
return addr;
}
Expand Down
15 changes: 10 additions & 5 deletions fs/hugetlbfs/inode.c
Original file line number Diff line number Diff line change
Expand Up @@ -82,14 +82,19 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
int ret;

/*
* vma alignment has already been checked by prepare_hugepage_range.
* If you add any error returns here, do so after setting VM_HUGETLB,
* so is_vm_hugetlb_page tests below unmap_region go the right way
* when do_mmap_pgoff unwinds (may be important on powerpc and ia64).
* vma address alignment (but not the pgoff alignment) has
* already been checked by prepare_hugepage_range. If you add
* any error returns here, do so after setting VM_HUGETLB, so
* is_vm_hugetlb_page tests below unmap_region go the right
* way when do_mmap_pgoff unwinds (may be important on powerpc
* and ia64).
*/
vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
vma->vm_ops = &hugetlb_vm_ops;

if (vma->vm_pgoff & ~(HPAGE_MASK >> PAGE_SHIFT))
return -EINVAL;

vma_len = (loff_t)(vma->vm_end - vma->vm_start);

mutex_lock(&inode->i_mutex);
Expand Down Expand Up @@ -132,7 +137,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
return -ENOMEM;

if (flags & MAP_FIXED) {
if (prepare_hugepage_range(addr, len, pgoff))
if (prepare_hugepage_range(addr, len))
return -EINVAL;
return addr;
}
Expand Down
10 changes: 3 additions & 7 deletions include/linux/hugetlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,20 +66,16 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
* If the arch doesn't supply something else, assume that hugepage
* size aligned regions are ok without further preparation.
*/
static inline int prepare_hugepage_range(unsigned long addr, unsigned long len,
pgoff_t pgoff)
static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
{
if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
return -EINVAL;
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
return -EINVAL;
return 0;
}
#else
int prepare_hugepage_range(unsigned long addr, unsigned long len,
pgoff_t pgoff);
int prepare_hugepage_range(unsigned long addr, unsigned long len);
#endif

#ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
Expand Down Expand Up @@ -117,7 +113,7 @@ static inline unsigned long hugetlb_total_pages(void)
#define hugetlb_report_meminfo(buf) 0
#define hugetlb_report_node_meminfo(n, buf) 0
#define follow_huge_pmd(mm, addr, pmd, write) NULL
#define prepare_hugepage_range(addr,len,pgoff) (-EINVAL)
#define prepare_hugepage_range(addr,len) (-EINVAL)
#define pmd_huge(x) 0
#define is_hugepage_only_range(mm, addr, len) 0
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
Expand Down

0 comments on commit dec4ad8

Please sign in to comment.