Skip to content

Commit

Permalink
x86, mm: only call early_ioremap_page_table_range_init() once
Browse files Browse the repository at this point in the history
On 32bit, before patcheset that only set page table for ram, we only
call that one time.

Now, we are calling that during every init_memory_mapping if we have holes
under max_low_pfn.

We should only call it one time after all ranges under max_low_page get
mapped just like we did before.

Also that could avoid the risk to run out of pgt_buf in BRK.

Need to update page_table_range_init() to count the pages for kmap page table
at first, and use new added alloc_low_pages() to get pages in sequence.
That will conform to the requirement that pages need to be in low to high order.

Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1353123563-3103-30-git-send-email-yinghai@kernel.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
  • Loading branch information
Yinghai Lu authored and H. Peter Anvin committed Nov 17, 2012
1 parent ddd3509 commit 719272c
Show file tree
Hide file tree
Showing 2 changed files with 46 additions and 14 deletions.
13 changes: 5 additions & 8 deletions arch/x86/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -343,14 +343,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
mr[i].page_size_mask);

#ifdef CONFIG_X86_32
early_ioremap_page_table_range_init();

load_cr3(swapper_pg_dir);
#endif

__flush_tlb_all();

add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);

return ret >> PAGE_SHIFT;
Expand Down Expand Up @@ -447,7 +439,12 @@ void __init init_mem_mapping(void)
/* can we preseve max_low_pfn ?*/
max_low_pfn = max_pfn;
}
#else
early_ioremap_page_table_range_init();
load_cr3(swapper_pg_dir);
__flush_tlb_all();
#endif

early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
}

Expand Down
47 changes: 41 additions & 6 deletions arch/x86/mm/init_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -135,8 +135,39 @@ pte_t * __init populate_extra_pte(unsigned long vaddr)
return one_page_table_init(pmd) + pte_idx;
}

static unsigned long __init
page_table_range_init_count(unsigned long start, unsigned long end)
{
unsigned long count = 0;
#ifdef CONFIG_HIGHMEM
int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
int pgd_idx, pmd_idx;
unsigned long vaddr;

if (pmd_idx_kmap_begin == pmd_idx_kmap_end)
return 0;

vaddr = start;
pgd_idx = pgd_index(vaddr);

for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
pmd_idx++) {
if ((vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin &&
(vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end)
count++;
vaddr += PMD_SIZE;
}
pmd_idx = 0;
}
#endif
return count;
}

static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
unsigned long vaddr, pte_t *lastpte)
unsigned long vaddr, pte_t *lastpte,
void **adr)
{
#ifdef CONFIG_HIGHMEM
/*
Expand All @@ -150,16 +181,15 @@ static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,

if (pmd_idx_kmap_begin != pmd_idx_kmap_end
&& (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
&& (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
&& ((__pa(pte) >> PAGE_SHIFT) < pgt_buf_start
|| (__pa(pte) >> PAGE_SHIFT) >= pgt_buf_end)) {
&& (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) {
pte_t *newpte;
int i;

BUG_ON(after_bootmem);
newpte = alloc_low_page();
newpte = *adr;
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(newpte + i, pte[i]);
*adr = (void *)(((unsigned long)(*adr)) + PAGE_SIZE);

paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
Expand Down Expand Up @@ -193,6 +223,11 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte = NULL;
unsigned long count = page_table_range_init_count(start, end);
void *adr = NULL;

if (count)
adr = alloc_low_pages(count);

vaddr = start;
pgd_idx = pgd_index(vaddr);
Expand All @@ -205,7 +240,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
pmd++, pmd_idx++) {
pte = page_table_kmap_check(one_page_table_init(pmd),
pmd, vaddr, pte);
pmd, vaddr, pte, &adr);

vaddr += PMD_SIZE;
}
Expand Down

0 comments on commit 719272c

Please sign in to comment.