Skip to content

Commit

Permalink
[PATCH] hugepage allocator cleanup
Browse files Browse the repository at this point in the history
Insert "fresh" huge pages into the hugepage allocator by the same means as
they are freed back into it.  This reduces code size and allows
enqueue_huge_page to be inlined into the hugepage free fastpath.

Eliminate occurances of hugepages on the free list with non-zero refcount.
This can allow stricter refcount checks in future.  Also required for
lockless pagecache.

Signed-off-by: Nick Piggin <npiggin@suse.de>

"This patch also eliminates a leak "cleaned up" by re-clobbering the
refcount on every allocation from the hugepage freelists.  With respect to
the lockless pagecache, the crucial aspect is to eliminate unconditional
set_page_count() to 0 on pages with potentially nonzero refcounts, though
closer inspection suggests the assignments removed are entirely spurious."

Acked-by: William Irwin <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
  • Loading branch information
Nick Piggin authored and Linus Torvalds committed Mar 22, 2006
1 parent 545b1ea commit a482289
Showing 1 changed file with 8 additions and 16 deletions.
24 changes: 8 additions & 16 deletions mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,28 +64,30 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
return page;
}

static struct page *alloc_fresh_huge_page(void)
static int alloc_fresh_huge_page(void)
{
static int nid = 0;
struct page *page;
page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
HUGETLB_PAGE_ORDER);
nid = (nid + 1) % num_online_nodes();
if (page) {
page[1].lru.next = (void *)free_huge_page; /* dtor */
spin_lock(&hugetlb_lock);
nr_huge_pages++;
nr_huge_pages_node[page_to_nid(page)]++;
spin_unlock(&hugetlb_lock);
put_page(page); /* free it into the hugepage allocator */
return 1;
}
return page;
return 0;
}

void free_huge_page(struct page *page)
{
BUG_ON(page_count(page));

INIT_LIST_HEAD(&page->lru);
page[1].lru.next = NULL; /* reset dtor */

spin_lock(&hugetlb_lock);
enqueue_huge_page(page);
Expand All @@ -105,7 +107,6 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr)
}
spin_unlock(&hugetlb_lock);
set_page_count(page, 1);
page[1].lru.next = (void *)free_huge_page; /* set dtor */
for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
clear_user_highpage(&page[i], addr);
return page;
Expand All @@ -114,7 +115,6 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr)
static int __init hugetlb_init(void)
{
unsigned long i;
struct page *page;

if (HPAGE_SHIFT == 0)
return 0;
Expand All @@ -123,12 +123,8 @@ static int __init hugetlb_init(void)
INIT_LIST_HEAD(&hugepage_freelists[i]);

for (i = 0; i < max_huge_pages; ++i) {
page = alloc_fresh_huge_page();
if (!page)
if (!alloc_fresh_huge_page())
break;
spin_lock(&hugetlb_lock);
enqueue_huge_page(page);
spin_unlock(&hugetlb_lock);
}
max_huge_pages = free_huge_pages = nr_huge_pages = i;
printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
Expand All @@ -154,8 +150,8 @@ static void update_and_free_page(struct page *page)
page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
1 << PG_private | 1<< PG_writeback);
set_page_count(&page[i], 0);
}
page[1].lru.next = NULL;
set_page_count(page, 1);
__free_pages(page, HUGETLB_PAGE_ORDER);
}
Expand Down Expand Up @@ -188,12 +184,8 @@ static inline void try_to_free_low(unsigned long count)
static unsigned long set_max_huge_pages(unsigned long count)
{
while (count > nr_huge_pages) {
struct page *page = alloc_fresh_huge_page();
if (!page)
if (!alloc_fresh_huge_page())
return nr_huge_pages;
spin_lock(&hugetlb_lock);
enqueue_huge_page(page);
spin_unlock(&hugetlb_lock);
}
if (count >= nr_huge_pages)
return nr_huge_pages;
Expand Down

0 comments on commit a482289

Please sign in to comment.