From ec1b5828d60dd247919d4efaad9d7990f46abbb6 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] --- yaml --- r: 14183 b: refs/heads/master c: e18c6874a505958d153a11f9d6947971c349008a h: refs/heads/master i: 14181: ecdf6db0dcdc0116947826a92e6df866388fb298 14179: 7919c138da1ffa9681558eec0e1aa1ff51ae916a 14175: 157a0715320999123b2fb6c319d97f84c2d884dc v: v3 --- [refs] | 2 +- trunk/arch/x86_64/mm/init.c | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/[refs] b/[refs] index 918cff9dd319..a208bd5f75d0 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: b0d41693217b3bb5b837940dc7465e82a9d49476 +refs/heads/master: e18c6874a505958d153a11f9d6947971c349008a diff --git a/trunk/arch/x86_64/mm/init.c b/trunk/arch/x86_64/mm/init.c index a1ad4cc423a7..2b1d6c382396 100644 --- a/trunk/arch/x86_64/mm/init.c +++ b/trunk/arch/x86_64/mm/init.c @@ -47,6 +47,8 @@ extern int swiotlb; extern char _stext[]; +static unsigned long dma_reserve __initdata; + DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); /* @@ -354,6 +356,21 @@ size_zones(unsigned long *z, unsigned long *h, w += z[i]; h[i] = e820_hole_size(s, w); } + + /* Add the space pace needed for mem_map to the holes too. */ + for (i = 0; i < MAX_NR_ZONES; i++) + h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE; + + /* The 16MB DMA zone has the kernel and other misc mappings. + Account them too */ + if (h[ZONE_DMA]) { + h[ZONE_DMA] += dma_reserve; + if (h[ZONE_DMA] >= z[ZONE_DMA]) { + printk(KERN_WARNING + "Kernel too large and filling up ZONE_DMA?\n"); + h[ZONE_DMA] = z[ZONE_DMA]; + } + } } #ifndef CONFIG_NUMA @@ -510,6 +527,8 @@ void __init reserve_bootmem_generic(unsigned long phys, unsigned len) #else reserve_bootmem(phys, len); #endif + if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) + dma_reserve += len / PAGE_SIZE; } int kern_addr_valid(unsigned long addr)