diff --git a/[refs] b/[refs] index fc9deeec1696..18765fa9de9c 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: afcb065450913745027169d906b9afc8294f7007 +refs/heads/master: c555e520ef794a94dc36a8ded93ece6369ff7ca0 diff --git a/trunk/arch/powerpc/mm/numa.c b/trunk/arch/powerpc/mm/numa.c index cf81049e1e51..213664c9cdca 100644 --- a/trunk/arch/powerpc/mm/numa.c +++ b/trunk/arch/powerpc/mm/numa.c @@ -840,8 +840,16 @@ static void __init *careful_allocation(int nid, unsigned long size, size, nid); /* - * If the memory came from a previously allocated node, we must - * retry with the bootmem allocator. + * We initialize the nodes in numeric order: 0, 1, 2... + * and hand over control from the LMB allocator to the + * bootmem allocator. If this function is called for + * node 5, then we know that all nodes <5 are using the + * bootmem allocator instead of the LMB allocator. + * + * So, check the nid from which this allocation came + * and double check to see if we need to use bootmem + * instead of the LMB. We don't free the LMB memory + * since it would be useless. */ new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT); if (new_nid < nid) {