Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 105284
b: refs/heads/master
c: fc1b8a7
h: refs/heads/master
v: v3
  • Loading branch information
Mel Gorman authored and Linus Torvalds committed Jul 24, 2008
1 parent 90f40f0 commit 6f2d4f8
Show file tree
Hide file tree
Showing 2 changed files with 42 additions and 42 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 9109fb7b3520de187ebc3646c209d66a233f7169
refs/heads/master: fc1b8a73dd71226902a11928dd5500326e101df9
82 changes: 41 additions & 41 deletions trunk/mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -716,6 +716,47 @@ unsigned long hugetlb_total_pages(void)
return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
}

static int hugetlb_acct_memory(long delta)
{
int ret = -ENOMEM;

spin_lock(&hugetlb_lock);
/*
* When cpuset is configured, it breaks the strict hugetlb page
* reservation as the accounting is done on a global variable. Such
* reservation is completely rubbish in the presence of cpuset because
* the reservation is not checked against page availability for the
* current cpuset. Application can still potentially OOM'ed by kernel
* with lack of free htlb page in cpuset that the task is in.
* Attempt to enforce strict accounting with cpuset is almost
* impossible (or too ugly) because cpuset is too fluid that
* task or memory node can be dynamically moved between cpusets.
*
* The change of semantics for shared hugetlb mapping with cpuset is
* undesirable. However, in order to preserve some of the semantics,
* we fall back to check against current free page availability as
* a best attempt and hopefully to minimize the impact of changing
* semantics that cpuset has.
*/
if (delta > 0) {
if (gather_surplus_pages(delta) < 0)
goto out;

if (delta > cpuset_mems_nr(free_huge_pages_node)) {
return_unused_surplus_pages(delta);
goto out;
}
}

ret = 0;
if (delta < 0)
return_unused_surplus_pages((unsigned long) -delta);

out:
spin_unlock(&hugetlb_lock);
return ret;
}

/*
* We cannot handle pagefaults against hugetlb pages at all. They cause
* handle_mm_fault() to try to instantiate regular-sized pages in the
Expand Down Expand Up @@ -1248,47 +1289,6 @@ static long region_truncate(struct list_head *head, long end)
return chg;
}

static int hugetlb_acct_memory(long delta)
{
int ret = -ENOMEM;

spin_lock(&hugetlb_lock);
/*
* When cpuset is configured, it breaks the strict hugetlb page
* reservation as the accounting is done on a global variable. Such
* reservation is completely rubbish in the presence of cpuset because
* the reservation is not checked against page availability for the
* current cpuset. Application can still potentially OOM'ed by kernel
* with lack of free htlb page in cpuset that the task is in.
* Attempt to enforce strict accounting with cpuset is almost
* impossible (or too ugly) because cpuset is too fluid that
* task or memory node can be dynamically moved between cpusets.
*
* The change of semantics for shared hugetlb mapping with cpuset is
* undesirable. However, in order to preserve some of the semantics,
* we fall back to check against current free page availability as
* a best attempt and hopefully to minimize the impact of changing
* semantics that cpuset has.
*/
if (delta > 0) {
if (gather_surplus_pages(delta) < 0)
goto out;

if (delta > cpuset_mems_nr(free_huge_pages_node)) {
return_unused_surplus_pages(delta);
goto out;
}
}

ret = 0;
if (delta < 0)
return_unused_surplus_pages((unsigned long) -delta);

out:
spin_unlock(&hugetlb_lock);
return ret;
}

int hugetlb_reserve_pages(struct inode *inode, long from, long to)
{
long ret, chg;
Expand Down

0 comments on commit 6f2d4f8

Please sign in to comment.