Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 217346
b: refs/heads/master
c: a9869b8
h: refs/heads/master
v: v3
  • Loading branch information
Naoya Horiguchi authored and Andi Kleen committed Oct 8, 2010
1 parent a812adb commit 7b0041a
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 23 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 6de2b1aab94355482bd2accdc115666509667458
refs/heads/master: a9869b837c098732bad84939015c0eb391b23e41
35 changes: 13 additions & 22 deletions trunk/mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -509,6 +509,7 @@ static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
return NULL;
page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
list_del(&page->lru);
set_page_refcounted(page);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
return page;
Expand Down Expand Up @@ -868,12 +869,6 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)

spin_lock(&hugetlb_lock);
if (page) {
/*
* This page is now managed by the hugetlb allocator and has
* no users -- drop the buddy allocator's reference.
*/
put_page_testzero(page);
VM_BUG_ON(page_count(page));
r_nid = page_to_nid(page);
set_compound_page_dtor(page, free_huge_page);
/*
Expand Down Expand Up @@ -936,16 +931,13 @@ static int gather_surplus_pages(struct hstate *h, int delta)
spin_unlock(&hugetlb_lock);
for (i = 0; i < needed; i++) {
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
if (!page) {
if (!page)
/*
* We were not able to allocate enough pages to
* satisfy the entire reservation so we free what
* we've allocated so far.
*/
spin_lock(&hugetlb_lock);
needed = 0;
goto free;
}

list_add(&page->lru, &surplus_list);
}
Expand All @@ -972,31 +964,31 @@ static int gather_surplus_pages(struct hstate *h, int delta)
needed += allocated;
h->resv_huge_pages += delta;
ret = 0;
free:

spin_unlock(&hugetlb_lock);
/* Free the needed pages to the hugetlb pool */
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
if ((--needed) < 0)
break;
list_del(&page->lru);
/*
* This page is now managed by the hugetlb allocator and has
* no users -- drop the buddy allocator's reference.
*/
put_page_testzero(page);
VM_BUG_ON(page_count(page));
enqueue_huge_page(h, page);
}

/* Free unnecessary surplus pages to the buddy allocator */
free:
if (!list_empty(&surplus_list)) {
spin_unlock(&hugetlb_lock);
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
list_del(&page->lru);
/*
* The page has a reference count of zero already, so
* call free_huge_page directly instead of using
* put_page. This must be done with hugetlb_lock
* unlocked which is safe because free_huge_page takes
* hugetlb_lock before deciding how to free the page.
*/
free_huge_page(page);
put_page(page);
}
spin_lock(&hugetlb_lock);
}
spin_lock(&hugetlb_lock);

return ret;
}
Expand Down Expand Up @@ -1123,7 +1115,6 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
}
}

set_page_refcounted(page);
set_page_private(page, (unsigned long) mapping);

vma_commit_reservation(h, vma, addr);
Expand Down

0 comments on commit 7b0041a

Please sign in to comment.