diff --git a/[refs] b/[refs] index 84436c8e4274..2e19193c4492 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 797df5749032c2286bc7ff3a52de41fde0cdf0a5 +refs/heads/master: 19fc3f0acde32636529969570055c7e2a744787c diff --git a/trunk/mm/hugetlb.c b/trunk/mm/hugetlb.c index 3737d82f5225..93ea46a0fba4 100644 --- a/trunk/mm/hugetlb.c +++ b/trunk/mm/hugetlb.c @@ -372,11 +372,19 @@ static int gather_surplus_pages(int delta) resv_huge_pages += delta; ret = 0; free: + /* Free the needed pages to the hugetlb pool */ list_for_each_entry_safe(page, tmp, &surplus_list, lru) { + if ((--needed) < 0) + break; list_del(&page->lru); - if ((--needed) >= 0) - enqueue_huge_page(page); - else { + enqueue_huge_page(page); + } + + /* Free unnecessary surplus pages to the buddy allocator */ + if (!list_empty(&surplus_list)) { + spin_unlock(&hugetlb_lock); + list_for_each_entry_safe(page, tmp, &surplus_list, lru) { + list_del(&page->lru); /* * The page has a reference count of zero already, so * call free_huge_page directly instead of using @@ -384,10 +392,9 @@ static int gather_surplus_pages(int delta) * unlocked which is safe because free_huge_page takes * hugetlb_lock before deciding how to free the page. */ - spin_unlock(&hugetlb_lock); free_huge_page(page); - spin_lock(&hugetlb_lock); } + spin_lock(&hugetlb_lock); } return ret;