Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 217342
b: refs/heads/master
c: bf50bab
h: refs/heads/master
v: v3
  • Loading branch information
Naoya Horiguchi authored and Andi Kleen committed Oct 8, 2010
1 parent 70720f8 commit 2d6e188
Show file tree
Hide file tree
Showing 3 changed files with 58 additions and 26 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 998b4382c1d75a6fd3b0e334dae3ab33bd074d99
refs/heads/master: bf50bab2b34483316162443587b8467952e07730
3 changes: 3 additions & 0 deletions trunk/include/linux/hugetlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,8 @@ struct huge_bootmem_page {
struct hstate *hstate;
};

struct page *alloc_huge_page_node(struct hstate *h, int nid);

/* arch callback */
int __init alloc_bootmem_huge_page(struct hstate *h);

Expand Down Expand Up @@ -303,6 +305,7 @@ static inline struct hstate *page_hstate(struct page *page)

#else
struct hstate {};
#define alloc_huge_page_node(h, nid) NULL
#define alloc_bootmem_huge_page(h) NULL
#define hstate_file(f) NULL
#define hstate_vma(v) NULL
Expand Down
79 changes: 54 additions & 25 deletions trunk/mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -466,11 +466,23 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
h->free_huge_pages_node[nid]++;
}

static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
{
struct page *page;

if (list_empty(&h->hugepage_freelists[nid]))
return NULL;
page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
list_del(&page->lru);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
return page;
}

static struct page *dequeue_huge_page_vma(struct hstate *h,
struct vm_area_struct *vma,
unsigned long address, int avoid_reserve)
{
int nid;
struct page *page = NULL;
struct mempolicy *mpol;
nodemask_t *nodemask;
Expand All @@ -496,19 +508,13 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,

for_each_zone_zonelist_nodemask(zone, z, zonelist,
MAX_NR_ZONES - 1, nodemask) {
nid = zone_to_nid(zone);
if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
!list_empty(&h->hugepage_freelists[nid])) {
page = list_entry(h->hugepage_freelists[nid].next,
struct page, lru);
list_del(&page->lru);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;

if (!avoid_reserve)
decrement_hugepage_resv_vma(h, vma);

break;
if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
page = dequeue_huge_page_node(h, zone_to_nid(zone));
if (page) {
if (!avoid_reserve)
decrement_hugepage_resv_vma(h, vma);
break;
}
}
}
err:
Expand Down Expand Up @@ -770,11 +776,10 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
return ret;
}

static struct page *alloc_buddy_huge_page(struct hstate *h,
struct vm_area_struct *vma, unsigned long address)
static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
{
struct page *page;
unsigned int nid;
unsigned int r_nid;

if (h->order >= MAX_ORDER)
return NULL;
Expand Down Expand Up @@ -812,9 +817,14 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
}
spin_unlock(&hugetlb_lock);

page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
__GFP_REPEAT|__GFP_NOWARN,
huge_page_order(h));
if (nid == NUMA_NO_NODE)
page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
__GFP_REPEAT|__GFP_NOWARN,
huge_page_order(h));
else
page = alloc_pages_exact_node(nid,
htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
__GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));

if (page && arch_prepare_hugepage(page)) {
__free_pages(page, huge_page_order(h));
Expand All @@ -829,13 +839,13 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
*/
put_page_testzero(page);
VM_BUG_ON(page_count(page));
nid = page_to_nid(page);
r_nid = page_to_nid(page);
set_compound_page_dtor(page, free_huge_page);
/*
* We incremented the global counters already
*/
h->nr_huge_pages_node[nid]++;
h->surplus_huge_pages_node[nid]++;
h->nr_huge_pages_node[r_nid]++;
h->surplus_huge_pages_node[r_nid]++;
__count_vm_event(HTLB_BUDDY_PGALLOC);
} else {
h->nr_huge_pages--;
Expand All @@ -847,6 +857,25 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
return page;
}

/*
* This allocation function is useful in the context where vma is irrelevant.
* E.g. soft-offlining uses this function because it only cares physical
* address of error page.
*/
struct page *alloc_huge_page_node(struct hstate *h, int nid)
{
struct page *page;

spin_lock(&hugetlb_lock);
page = dequeue_huge_page_node(h, nid);
spin_unlock(&hugetlb_lock);

if (!page)
page = alloc_buddy_huge_page(h, nid);

return page;
}

/*
* Increase the hugetlb pool such that it can accomodate a reservation
* of size 'delta'.
Expand All @@ -871,7 +900,7 @@ static int gather_surplus_pages(struct hstate *h, int delta)
retry:
spin_unlock(&hugetlb_lock);
for (i = 0; i < needed; i++) {
page = alloc_buddy_huge_page(h, NULL, 0);
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
if (!page) {
/*
* We were not able to allocate enough pages to
Expand Down Expand Up @@ -1052,7 +1081,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
spin_unlock(&hugetlb_lock);

if (!page) {
page = alloc_buddy_huge_page(h, vma, addr);
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
if (!page) {
hugetlb_put_quota(inode->i_mapping, chg);
return ERR_PTR(-VM_FAULT_SIGBUS);
Expand Down

0 comments on commit 2d6e188

Please sign in to comment.