Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 332370
b: refs/heads/master
c: 26234f3
h: refs/heads/master
v: v3
  • Loading branch information
Xiao Guangrong authored and Linus Torvalds committed Oct 9, 2012
1 parent 3932df6 commit d7163cf
Show file tree
Hide file tree
Showing 2 changed files with 99 additions and 69 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 420256ef02660af0acf28c12fe4b7d514ca88a4d
refs/heads/master: 26234f36ef3ec7efcfa9acb181427849c1f9db7c
166 changes: 98 additions & 68 deletions trunk/mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -1827,28 +1827,34 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
}
}

static void collapse_huge_page(struct mm_struct *mm,
unsigned long address,
struct page **hpage,
struct vm_area_struct *vma,
int node)
static void khugepaged_alloc_sleep(void)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd, _pmd;
pte_t *pte;
pgtable_t pgtable;
struct page *new_page;
spinlock_t *ptl;
int isolated;
unsigned long hstart, hend;
wait_event_freezable_timeout(khugepaged_wait, false,
msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
}

VM_BUG_ON(address & ~HPAGE_PMD_MASK);
#ifndef CONFIG_NUMA
up_read(&mm->mmap_sem);
VM_BUG_ON(!*hpage);
new_page = *hpage;
#else
#ifdef CONFIG_NUMA
static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
{
if (IS_ERR(*hpage)) {
if (!*wait)
return false;

*wait = false;
khugepaged_alloc_sleep();
} else if (*hpage) {
put_page(*hpage);
*hpage = NULL;
}

return true;
}

static struct page
*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
int node)
{
VM_BUG_ON(*hpage);
/*
* Allocate the page while the vma is still valid and under
Expand All @@ -1860,23 +1866,89 @@ static void collapse_huge_page(struct mm_struct *mm,
* mmap_sem in read mode is good idea also to allow greater
* scalability.
*/
new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
*hpage = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
node, __GFP_OTHER_NODE);

/*
* After allocating the hugepage, release the mmap_sem read lock in
* preparation for taking it in write mode.
*/
up_read(&mm->mmap_sem);
if (unlikely(!new_page)) {
if (unlikely(!*hpage)) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
*hpage = ERR_PTR(-ENOMEM);
return;
return NULL;
}
*hpage = new_page;

count_vm_event(THP_COLLAPSE_ALLOC);
return *hpage;
}
#else
static struct page *khugepaged_alloc_hugepage(bool *wait)
{
struct page *hpage;

do {
hpage = alloc_hugepage(khugepaged_defrag());
if (!hpage) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
if (!*wait)
return NULL;

*wait = false;
khugepaged_alloc_sleep();
} else
count_vm_event(THP_COLLAPSE_ALLOC);
} while (unlikely(!hpage) && likely(khugepaged_enabled()));

return hpage;
}

static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
{
if (!*hpage)
*hpage = khugepaged_alloc_hugepage(wait);

if (unlikely(!*hpage))
return false;

return true;
}

static struct page
*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
int node)
{
up_read(&mm->mmap_sem);
VM_BUG_ON(!*hpage);
return *hpage;
}
#endif

static void collapse_huge_page(struct mm_struct *mm,
unsigned long address,
struct page **hpage,
struct vm_area_struct *vma,
int node)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd, _pmd;
pte_t *pte;
pgtable_t pgtable;
struct page *new_page;
spinlock_t *ptl;
int isolated;
unsigned long hstart, hend;

VM_BUG_ON(address & ~HPAGE_PMD_MASK);

/* release the mmap_sem read lock. */
new_page = khugepaged_alloc_page(hpage, mm, vma, address, node);
if (!new_page)
return;

if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
return;

Expand Down Expand Up @@ -2215,34 +2287,6 @@ static int khugepaged_wait_event(void)
kthread_should_stop();
}

static void khugepaged_alloc_sleep(void)
{
wait_event_freezable_timeout(khugepaged_wait, false,
msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
}

#ifndef CONFIG_NUMA
static struct page *khugepaged_alloc_hugepage(bool *wait)
{
struct page *hpage;

do {
hpage = alloc_hugepage(khugepaged_defrag());
if (!hpage) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
if (!*wait)
return NULL;

*wait = false;
khugepaged_alloc_sleep();
} else
count_vm_event(THP_COLLAPSE_ALLOC);
} while (unlikely(!hpage) && likely(khugepaged_enabled()));

return hpage;
}
#endif

static void khugepaged_do_scan(void)
{
struct page *hpage = NULL;
Expand All @@ -2253,23 +2297,9 @@ static void khugepaged_do_scan(void)
barrier(); /* write khugepaged_pages_to_scan to local stack */

while (progress < pages) {
#ifndef CONFIG_NUMA
if (!hpage)
hpage = khugepaged_alloc_hugepage(&wait);

if (unlikely(!hpage))
if (!khugepaged_prealloc_page(&hpage, &wait))
break;
#else
if (IS_ERR(hpage)) {
if (!wait)
break;
wait = false;
khugepaged_alloc_sleep();
} else if (hpage) {
put_page(hpage);
hpage = NULL;
}
#endif

cond_resched();

if (unlikely(kthread_should_stop() || freezing(current)))
Expand Down

0 comments on commit d7163cf

Please sign in to comment.