Skip to content

Commit

Permalink
thp: merge page pre-alloc in khugepaged_loop into khugepaged_do_scan
Browse files Browse the repository at this point in the history
There are two pre-alloc operations in these two function, the different is:
- it allows to sleep if page alloc fail in khugepaged_loop
- it exits immediately if page alloc fail in khugepaged_do_scan

Actually, in khugepaged_do_scan, we can allow the pre-alloc to sleep on
the first failure, then the operation in khugepaged_loop can be removed

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Xiao Guangrong authored and Linus Torvalds committed Oct 9, 2012
1 parent 9817626 commit d516904
Showing 1 changed file with 45 additions and 52 deletions.
97 changes: 45 additions & 52 deletions mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -2222,28 +2222,59 @@ static int khugepaged_wait_event(void)
kthread_should_stop();
}

static void khugepaged_do_scan(struct page **hpage)
static void khugepaged_alloc_sleep(void)
{
wait_event_freezable_timeout(khugepaged_wait, false,
msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
}

#ifndef CONFIG_NUMA
static struct page *khugepaged_alloc_hugepage(bool *wait)
{
struct page *hpage;

do {
hpage = alloc_hugepage(khugepaged_defrag());
if (!hpage) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
if (!*wait)
return NULL;

*wait = false;
khugepaged_alloc_sleep();
} else
count_vm_event(THP_COLLAPSE_ALLOC);
} while (unlikely(!hpage) && likely(khugepaged_enabled()));

return hpage;
}
#endif

static void khugepaged_do_scan(void)
{
struct page *hpage = NULL;
unsigned int progress = 0, pass_through_head = 0;
unsigned int pages = khugepaged_pages_to_scan;
bool wait = true;

barrier(); /* write khugepaged_pages_to_scan to local stack */

while (progress < pages) {
cond_resched();

#ifndef CONFIG_NUMA
if (!*hpage) {
*hpage = alloc_hugepage(khugepaged_defrag());
if (unlikely(!*hpage)) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
if (!hpage)
hpage = khugepaged_alloc_hugepage(&wait);

if (unlikely(!hpage))
break;
#else
if (IS_ERR(hpage)) {
if (!wait)
break;
}
count_vm_event(THP_COLLAPSE_ALLOC);
wait = false;
khugepaged_alloc_sleep();
}
#else
if (IS_ERR(*hpage))
break;
#endif

if (unlikely(kthread_should_stop() || freezing(current)))
Expand All @@ -2255,37 +2286,16 @@ static void khugepaged_do_scan(struct page **hpage)
if (khugepaged_has_work() &&
pass_through_head < 2)
progress += khugepaged_scan_mm_slot(pages - progress,
hpage);
&hpage);
else
progress = pages;
spin_unlock(&khugepaged_mm_lock);
}
}

static void khugepaged_alloc_sleep(void)
{
wait_event_freezable_timeout(khugepaged_wait, false,
msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
if (!IS_ERR_OR_NULL(hpage))
put_page(hpage);
}

#ifndef CONFIG_NUMA
static struct page *khugepaged_alloc_hugepage(void)
{
struct page *hpage;

do {
hpage = alloc_hugepage(khugepaged_defrag());
if (!hpage) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
khugepaged_alloc_sleep();
} else
count_vm_event(THP_COLLAPSE_ALLOC);
} while (unlikely(!hpage) &&
likely(khugepaged_enabled()));
return hpage;
}
#endif

static void khugepaged_wait_work(void)
{
try_to_freeze();
Expand All @@ -2306,25 +2316,8 @@ static void khugepaged_wait_work(void)

static void khugepaged_loop(void)
{
struct page *hpage = NULL;

while (likely(khugepaged_enabled())) {
#ifndef CONFIG_NUMA
hpage = khugepaged_alloc_hugepage();
if (unlikely(!hpage))
break;
#else
if (IS_ERR(hpage)) {
khugepaged_alloc_sleep();
hpage = NULL;
}
#endif

khugepaged_do_scan(&hpage);

if (!IS_ERR_OR_NULL(hpage))
put_page(hpage);

khugepaged_do_scan();
khugepaged_wait_work();
}
}
Expand Down

0 comments on commit d516904

Please sign in to comment.