From 32e94f9d99bb124e2b8b7baa089415d367095b85 Mon Sep 17 00:00:00 2001 From: Adam Litke Date: Wed, 14 Nov 2007 16:59:37 -0800 Subject: [PATCH] --- yaml --- r: 73696 b: refs/heads/master c: 348ea204cc23cda35faf962414b674c57da647d7 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/mm/hugetlb.c | 46 +++++++++++++++++++++++++++------------------- 2 files changed, 28 insertions(+), 20 deletions(-) diff --git a/[refs] b/[refs] index 651e25ed0d4e..65cf157bccea 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 6c55be8b962f1bdc592d579e81fc27b11ea53dfc +refs/heads/master: 348ea204cc23cda35faf962414b674c57da647d7 diff --git a/trunk/mm/hugetlb.c b/trunk/mm/hugetlb.c index e2c80631d36a..f43b3dca12b5 100644 --- a/trunk/mm/hugetlb.c +++ b/trunk/mm/hugetlb.c @@ -353,35 +353,43 @@ void return_unused_surplus_pages(unsigned long unused_resv_pages) } } -static struct page *alloc_huge_page(struct vm_area_struct *vma, - unsigned long addr) + +static struct page *alloc_huge_page_shared(struct vm_area_struct *vma, + unsigned long addr) { - struct page *page = NULL; - int use_reserved_page = vma->vm_flags & VM_MAYSHARE; + struct page *page; spin_lock(&hugetlb_lock); - if (!use_reserved_page && (free_huge_pages <= resv_huge_pages)) - goto fail; - page = dequeue_huge_page(vma, addr); - if (!page) - goto fail; - spin_unlock(&hugetlb_lock); - set_page_refcounted(page); return page; +} -fail: - spin_unlock(&hugetlb_lock); +static struct page *alloc_huge_page_private(struct vm_area_struct *vma, + unsigned long addr) +{ + struct page *page = NULL; - /* - * Private mappings do not use reserved huge pages so the allocation - * may have failed due to an undersized hugetlb pool. Try to grab a - * surplus huge page from the buddy allocator. - */ - if (!use_reserved_page) + spin_lock(&hugetlb_lock); + if (free_huge_pages > resv_huge_pages) + page = dequeue_huge_page(vma, addr); + spin_unlock(&hugetlb_lock); + if (!page) page = alloc_buddy_huge_page(vma, addr); + return page; +} +static struct page *alloc_huge_page(struct vm_area_struct *vma, + unsigned long addr) +{ + struct page *page; + + if (vma->vm_flags & VM_MAYSHARE) + page = alloc_huge_page_shared(vma, addr); + else + page = alloc_huge_page_private(vma, addr); + if (page) + set_page_refcounted(page); return page; }