From 5704c8ef06470001216e94ec697c1b407c344f7c Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Mon, 29 Apr 2013 15:07:56 -0700 Subject: [PATCH] --- yaml --- r: 365418 b: refs/heads/master c: 8e2cdbcb86b0abefc3d07922c48edb01fece3c56 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/arch/x86/mm/init_64.c | 46 +++++++++++++++++++++---------------- 2 files changed, 27 insertions(+), 21 deletions(-) diff --git a/[refs] b/[refs] index 58a1b3499831..c78bb6a48571 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: e8216da5c719c3bfec12779b6faf456009f01c44 +refs/heads/master: 8e2cdbcb86b0abefc3d07922c48edb01fece3c56 diff --git a/trunk/arch/x86/mm/init_64.c b/trunk/arch/x86/mm/init_64.c index 9f6347c468b0..71ff55a1b287 100644 --- a/trunk/arch/x86/mm/init_64.c +++ b/trunk/arch/x86/mm/init_64.c @@ -1303,31 +1303,37 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start, pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { - pte_t entry; void *p; p = vmemmap_alloc_block_buf(PMD_SIZE, node); - if (!p) - return -ENOMEM; - - entry = pfn_pte(__pa(p) >> PAGE_SHIFT, - PAGE_KERNEL_LARGE); - set_pmd(pmd, __pmd(pte_val(entry))); - - /* check to see if we have contiguous blocks */ - if (p_end != p || node_start != node) { - if (p_start) - printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", - addr_start, addr_end-1, p_start, p_end-1, node_start); - addr_start = addr; - node_start = node; - p_start = p; - } + if (p) { + pte_t entry; + + entry = pfn_pte(__pa(p) >> PAGE_SHIFT, + PAGE_KERNEL_LARGE); + set_pmd(pmd, __pmd(pte_val(entry))); + + /* check to see if we have contiguous blocks */ + if (p_end != p || node_start != node) { + if (p_start) + printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", + addr_start, addr_end-1, p_start, p_end-1, node_start); + addr_start = addr; + node_start = node; + p_start = p; + } - addr_end = addr + PMD_SIZE; - p_end = p + PMD_SIZE; - } else + addr_end = addr + PMD_SIZE; + p_end = p + PMD_SIZE; + continue; + } + } else if (pmd_large(*pmd)) { vmemmap_verify((pte_t *)pmd, node, addr, next); + continue; + } + pr_warn_once("vmemmap: falling back to regular page backing\n"); + if (vmemmap_populate_basepages(addr, next, node)) + return -ENOMEM; } return 0; }