Skip to content

Commit

Permalink
hugetlb: enforce quotas during reservation for shared mappings
Browse files Browse the repository at this point in the history
When a MAP_SHARED mmap of a hugetlbfs file succeeds, huge pages are reserved
to guarantee no problems will occur later when instantiating pages.  If quotas
are in force, page instantiation could fail due to a race with another process
or an oversized (but approved) shared mapping.

To prevent these scenarios, debit the quota for the full reservation amount up
front and credit the unused quota when the reservation is released.

Signed-off-by: Adam Litke <agl@us.ibm.com>
Cc: Ken Chen <kenchen@google.com>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: David Gibson <hermes@gibson.dropbear.id.au>
Cc: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Adam Litke authored and Linus Torvalds committed Nov 15, 2007
1 parent 9a119c0 commit 90d8b7e
Showing 1 changed file with 13 additions and 10 deletions.
23 changes: 13 additions & 10 deletions mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -367,21 +367,24 @@ static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
spin_lock(&hugetlb_lock);
page = dequeue_huge_page(vma, addr);
spin_unlock(&hugetlb_lock);
return page;
return page ? page : ERR_PTR(-VM_FAULT_OOM);
}

static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
unsigned long addr)
{
struct page *page = NULL;

if (hugetlb_get_quota(vma->vm_file->f_mapping, 1))
return ERR_PTR(-VM_FAULT_SIGBUS);

spin_lock(&hugetlb_lock);
if (free_huge_pages > resv_huge_pages)
page = dequeue_huge_page(vma, addr);
spin_unlock(&hugetlb_lock);
if (!page)
page = alloc_buddy_huge_page(vma, addr);
return page;
return page ? page : ERR_PTR(-VM_FAULT_OOM);
}

static struct page *alloc_huge_page(struct vm_area_struct *vma,
Expand All @@ -390,19 +393,16 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
struct page *page;
struct address_space *mapping = vma->vm_file->f_mapping;

if (hugetlb_get_quota(mapping, 1))
return ERR_PTR(-VM_FAULT_SIGBUS);

if (vma->vm_flags & VM_MAYSHARE)
page = alloc_huge_page_shared(vma, addr);
else
page = alloc_huge_page_private(vma, addr);
if (page) {

if (!IS_ERR(page)) {
set_page_refcounted(page);
set_page_private(page, (unsigned long) mapping);
return page;
} else
return ERR_PTR(-VM_FAULT_OOM);
}
return page;
}

static int __init hugetlb_init(void)
Expand Down Expand Up @@ -1148,6 +1148,8 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to)
if (chg < 0)
return chg;

if (hugetlb_get_quota(inode->i_mapping, chg))
return -ENOSPC;
ret = hugetlb_acct_memory(chg);
if (ret < 0)
return ret;
Expand All @@ -1158,5 +1160,6 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to)
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
{
long chg = region_truncate(&inode->i_mapping->private_list, offset);
hugetlb_acct_memory(freed - chg);
hugetlb_put_quota(inode->i_mapping, (chg - freed));
hugetlb_acct_memory(-(chg - freed));
}

0 comments on commit 90d8b7e

Please sign in to comment.