Skip to content

Commit

Permalink
Merge branch 'x86/bootmem' into x86/mm
Browse files Browse the repository at this point in the history
Merge reason: the topic is ready - consolidate it into the more generic x86/mm tree
              and prevent conflicts.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Ingo Molnar committed Feb 16, 2011
2 parents 9a6d44b + d2137d5 commit 02ac81a
Show file tree
Hide file tree
Showing 9 changed files with 178 additions and 109 deletions.
8 changes: 8 additions & 0 deletions arch/x86/include/asm/page_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
#define _ASM_X86_PAGE_DEFS_H

#include <linux/const.h>
#include <linux/types.h>

/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
Expand Down Expand Up @@ -45,9 +46,16 @@ extern int devmem_is_allowed(unsigned long pagenr);
extern unsigned long max_low_pfn_mapped;
extern unsigned long max_pfn_mapped;

static inline phys_addr_t get_max_mapped(void)
{
return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
}

extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);

void init_memory_mapping_high(void);

extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn,
int acpi, int k8);
extern void free_initmem(void);
Expand Down
33 changes: 16 additions & 17 deletions arch/x86/kernel/aperture_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/mmzone.h>
#include <linux/pci_ids.h>
#include <linux/pci.h>
Expand Down Expand Up @@ -57,7 +57,7 @@ static void __init insert_aperture_resource(u32 aper_base, u32 aper_size)
static u32 __init allocate_aperture(void)
{
u32 aper_size;
void *p;
unsigned long addr;

/* aper_size should <= 1G */
if (fallback_aper_order > 5)
Expand All @@ -83,27 +83,26 @@ static u32 __init allocate_aperture(void)
* so don't use 512M below as gart iommu, leave the space for kernel
* code for safe
*/
p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20);
addr = memblock_find_in_range(0, 1ULL<<32, aper_size, 512ULL<<20);
if (addr == MEMBLOCK_ERROR || addr + aper_size > 0xffffffff) {
printk(KERN_ERR
"Cannot allocate aperture memory hole (%lx,%uK)\n",
addr, aper_size>>10);
return 0;
}
memblock_x86_reserve_range(addr, addr + aper_size, "aperture64");
/*
* Kmemleak should not scan this block as it may not be mapped via the
* kernel direct mapping.
*/
kmemleak_ignore(p);
if (!p || __pa(p)+aper_size > 0xffffffff) {
printk(KERN_ERR
"Cannot allocate aperture memory hole (%p,%uK)\n",
p, aper_size>>10);
if (p)
free_bootmem(__pa(p), aper_size);
return 0;
}
kmemleak_ignore(phys_to_virt(addr));
printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
aper_size >> 10, __pa(p));
insert_aperture_resource((u32)__pa(p), aper_size);
register_nosave_region((u32)__pa(p) >> PAGE_SHIFT,
(u32)__pa(p+aper_size) >> PAGE_SHIFT);
aper_size >> 10, addr);
insert_aperture_resource((u32)addr, aper_size);
register_nosave_region(addr >> PAGE_SHIFT,
(addr+aper_size) >> PAGE_SHIFT);

return (u32)__pa(p);
return (u32)addr;
}


Expand Down
41 changes: 24 additions & 17 deletions arch/x86/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -293,10 +293,32 @@ static void __init init_gbpages(void)
else
direct_gbpages = 0;
}

static void __init cleanup_highmap_brk_end(void)
{
pud_t *pud;
pmd_t *pmd;

mmu_cr4_features = read_cr4();

/*
* _brk_end cannot change anymore, but it and _end may be
* located on different 2M pages. cleanup_highmap(), however,
* can only consider _end when it runs, so destroy any
* mappings beyond _brk_end here.
*/
pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
pmd = pmd_offset(pud, _brk_end - 1);
while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
pmd_clear(pmd);
}
#else
static inline void init_gbpages(void)
{
}
static inline void cleanup_highmap_brk_end(void)
{
}
#endif

static void __init reserve_brk(void)
Expand All @@ -307,6 +329,8 @@ static void __init reserve_brk(void)
/* Mark brk area as locked down and no longer taking any
new allocations */
_brk_start = 0;

cleanup_highmap_brk_end();
}

#ifdef CONFIG_BLK_DEV_INITRD
Expand Down Expand Up @@ -680,15 +704,6 @@ static int __init parse_reservelow(char *p)

early_param("reservelow", parse_reservelow);

static u64 __init get_max_mapped(void)
{
u64 end = max_pfn_mapped;

end <<= PAGE_SHIFT;

return end;
}

/*
* Determine if we were loaded by an EFI loader. If so, then we have also been
* passed the efi memmap, systab, etc., so we should use these data structures
Expand Down Expand Up @@ -950,14 +965,6 @@ void __init setup_arch(char **cmdline_p)
max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
max_pfn_mapped = max_low_pfn_mapped;

#ifdef CONFIG_X86_64
if (max_pfn > max_low_pfn) {
max_pfn_mapped = init_memory_mapping(1UL<<32,
max_pfn<<PAGE_SHIFT);
/* can we preseve max_low_pfn ?*/
max_low_pfn = max_pfn;
}
#endif
memblock.current_limit = get_max_mapped();

/*
Expand Down
8 changes: 5 additions & 3 deletions arch/x86/mm/amdtopology_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -278,12 +278,14 @@ int __init amd_scan_nodes(void)
apicid_base = boot_cpu_physical_apicid;
}

for_each_node_mask(i, node_possible_map) {
int j;

for_each_node_mask(i, node_possible_map)
memblock_x86_register_active_regions(i,
nodes[i].start >> PAGE_SHIFT,
nodes[i].end >> PAGE_SHIFT);
init_memory_mapping_high();
for_each_node_mask(i, node_possible_map) {
int j;

for (j = apicid_base; j < cores + apicid_base; j++)
apicid_to_node[(i << bits) + j] = i;
setup_node_bootmem(i, nodes[i].start, nodes[i].end);
Expand Down
36 changes: 4 additions & 32 deletions arch/x86/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ int direct_gbpages
static void __init find_early_table_space(unsigned long end, int use_pse,
int use_gbpages)
{
unsigned long puds, pmds, ptes, tables, start;
unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
phys_addr_t base;

puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
Expand Down Expand Up @@ -65,20 +65,11 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
#ifdef CONFIG_X86_32
/* for fixmap */
tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
#endif

/*
* RED-PEN putting page tables only on node 0 could
* cause a hotspot and fill up ZONE_DMA. The page tables
* need roughly 0.5KB per GB.
*/
#ifdef CONFIG_X86_32
start = 0x7000;
#else
start = 0x8000;
good_end = max_pfn_mapped << PAGE_SHIFT;
#endif
base = memblock_find_in_range(start, max_pfn_mapped<<PAGE_SHIFT,
tables, PAGE_SIZE);

base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE);
if (base == MEMBLOCK_ERROR)
panic("Cannot find space for the kernel page tables");

Expand Down Expand Up @@ -279,25 +270,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
load_cr3(swapper_pg_dir);
#endif

#ifdef CONFIG_X86_64
if (!after_bootmem && !start) {
pud_t *pud;
pmd_t *pmd;

mmu_cr4_features = read_cr4();

/*
* _brk_end cannot change anymore, but it and _end may be
* located on different 2M pages. cleanup_highmap(), however,
* can only consider _end when it runs, so destroy any
* mappings beyond _brk_end here.
*/
pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
pmd = pmd_offset(pud, _brk_end - 1);
while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
pmd_clear(pmd);
}
#endif
__flush_tlb_all();

if (!after_bootmem && e820_table_end > e820_table_start)
Expand Down
Loading

0 comments on commit 02ac81a

Please sign in to comment.