Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 233925
b: refs/heads/master
c: 5c4b4be
h: refs/heads/master
i:
  233923: 4b86eba
v: v3
  • Loading branch information
Andi Kleen authored and Linus Torvalds committed Mar 5, 2011
1 parent 047980b commit 069cf5a
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 9 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 19ee151e140daa5183c4984981801e542e0544fb
refs/heads/master: 5c4b4be3b6b937256103a5ae49177e0c3a17cb8f
24 changes: 17 additions & 7 deletions trunk/mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -650,10 +650,10 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag)

static inline struct page *alloc_hugepage_vma(int defrag,
struct vm_area_struct *vma,
unsigned long haddr)
unsigned long haddr, int nd)
{
return alloc_pages_vma(alloc_hugepage_gfpmask(defrag),
HPAGE_PMD_ORDER, vma, haddr, numa_node_id());
HPAGE_PMD_ORDER, vma, haddr, nd);
}

#ifndef CONFIG_NUMA
Expand All @@ -678,7 +678,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(khugepaged_enter(vma)))
return VM_FAULT_OOM;
page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
vma, haddr);
vma, haddr, numa_node_id());
if (unlikely(!page))
goto out;
if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
Expand Down Expand Up @@ -902,7 +902,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (transparent_hugepage_enabled(vma) &&
!transparent_hugepage_debug_cow())
new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
vma, haddr);
vma, haddr, numa_node_id());
else
new_page = NULL;

Expand Down Expand Up @@ -1745,7 +1745,8 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
static void collapse_huge_page(struct mm_struct *mm,
unsigned long address,
struct page **hpage,
struct vm_area_struct *vma)
struct vm_area_struct *vma,
int node)
{
pgd_t *pgd;
pud_t *pud;
Expand Down Expand Up @@ -1773,7 +1774,8 @@ static void collapse_huge_page(struct mm_struct *mm,
* mmap_sem in read mode is good idea also to allow greater
* scalability.
*/
new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address);
new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
node);
if (unlikely(!new_page)) {
up_read(&mm->mmap_sem);
*hpage = ERR_PTR(-ENOMEM);
Expand Down Expand Up @@ -1919,6 +1921,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
struct page *page;
unsigned long _address;
spinlock_t *ptl;
int node = -1;

VM_BUG_ON(address & ~HPAGE_PMD_MASK);

Expand Down Expand Up @@ -1949,6 +1952,13 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
page = vm_normal_page(vma, _address, pteval);
if (unlikely(!page))
goto out_unmap;
/*
* Chose the node of the first page. This could
* be more sophisticated and look at more pages,
* but isn't for now.
*/
if (node == -1)
node = page_to_nid(page);
VM_BUG_ON(PageCompound(page));
if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
goto out_unmap;
Expand All @@ -1965,7 +1975,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
pte_unmap_unlock(pte, ptl);
if (ret)
/* collapse_huge_page will return with the mmap_sem released */
collapse_huge_page(mm, address, hpage, vma);
collapse_huge_page(mm, address, hpage, vma, node);
out:
return ret;
}
Expand Down
3 changes: 2 additions & 1 deletion trunk/mm/mempolicy.c
Original file line number Diff line number Diff line change
Expand Up @@ -1891,7 +1891,8 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
else
page = __alloc_pages_nodemask(gfp, order,
policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
policy_zonelist(gfp, pol, numa_node_id()),
policy_nodemask(gfp, pol));
put_mems_allowed();
return page;
}
Expand Down

0 comments on commit 069cf5a

Please sign in to comment.