Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 129951
b: refs/heads/master
c: a3c6018
h: refs/heads/master
i:
  129949: d1838e6
  129947: e6122fb
  129943: f808f9b
  129935: f2cbab2
  129919: 5f575e7
v: v3
  • Loading branch information
Jan Beulich authored and Ingo Molnar committed Jan 16, 2009
1 parent b816b24 commit 02d577e
Show file tree
Hide file tree
Showing 4 changed files with 46 additions and 30 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 18c07cf530cf4aa8b7551801f68ed40db5ee4e45
refs/heads/master: a3c6018e565dc07cf3738ace6bbe412f97b1bba8
1 change: 0 additions & 1 deletion trunk/arch/x86/include/asm/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,6 @@ extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size);
* A boot-time mapping is currently limited to at most 16 pages.
*/
extern void early_ioremap_init(void);
extern void early_ioremap_clear(void);
extern void early_ioremap_reset(void);
extern void __iomem *early_ioremap(unsigned long offset, unsigned long size);
extern void __iomem *early_memremap(unsigned long offset, unsigned long size);
Expand Down
48 changes: 45 additions & 3 deletions trunk/arch/x86/mm/init_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,47 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
return pte_offset_kernel(pmd, 0);
}

static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
unsigned long vaddr, pte_t *lastpte)
{
#ifdef CONFIG_HIGHMEM
/*
* Something (early fixmap) may already have put a pte
* page here, which causes the page table allocation
* to become nonlinear. Attempt to fix it, and if it
* is still nonlinear then we have to bug.
*/
int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;

if (pmd_idx_kmap_begin != pmd_idx_kmap_end
&& (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
&& (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
&& ((__pa(pte) >> PAGE_SHIFT) < table_start
|| (__pa(pte) >> PAGE_SHIFT) >= table_end)) {
pte_t *newpte;
int i;

BUG_ON(after_init_bootmem);
newpte = alloc_low_page();
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(newpte + i, pte[i]);

paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
BUG_ON(newpte != pte_offset_kernel(pmd, 0));
__flush_tlb_all();

paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
pte = newpte;
}
BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
&& vaddr > fix_to_virt(FIX_KMAP_END)
&& lastpte && lastpte + PTRS_PER_PTE != pte);
#endif
return pte;
}

/*
* This function initializes a certain range of kernel virtual memory
* with new bootmem page tables, everywhere page tables are missing in
Expand All @@ -154,6 +195,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
unsigned long vaddr;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte = NULL;

vaddr = start;
pgd_idx = pgd_index(vaddr);
Expand All @@ -165,7 +207,8 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
pmd = pmd + pmd_index(vaddr);
for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
pmd++, pmd_idx++) {
one_page_table_init(pmd);
pte = page_table_kmap_check(one_page_table_init(pmd),
pmd, vaddr, pte);

vaddr += PMD_SIZE;
}
Expand Down Expand Up @@ -508,7 +551,6 @@ static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base)
* Fixed mappings, only the page table structure has to be
* created - mappings will be set by set_fixmap():
*/
early_ioremap_clear();
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
page_table_range_init(vaddr, end, pgd_base);
Expand Down Expand Up @@ -801,7 +843,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse)
tables += PAGE_ALIGN(ptes * sizeof(pte_t));

/* for fixmap */
tables += PAGE_SIZE * 2;
tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t));

/*
* RED-PEN putting page tables only on node 0 could
Expand Down
25 changes: 0 additions & 25 deletions trunk/arch/x86/mm/ioremap.c
Original file line number Diff line number Diff line change
Expand Up @@ -557,34 +557,9 @@ void __init early_ioremap_init(void)
}
}

void __init early_ioremap_clear(void)
{
pmd_t *pmd;

if (early_ioremap_debug)
printk(KERN_INFO "early_ioremap_clear()\n");

pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
pmd_clear(pmd);
paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
__flush_tlb_all();
}

void __init early_ioremap_reset(void)
{
enum fixed_addresses idx;
unsigned long addr, phys;
pte_t *pte;

after_paging_init = 1;
for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
addr = fix_to_virt(idx);
pte = early_ioremap_pte(addr);
if (pte_present(*pte)) {
phys = pte_val(*pte) & PAGE_MASK;
set_fixmap(idx, phys);
}
}
}

static void __init __early_set_fixmap(enum fixed_addresses idx,
Expand Down

0 comments on commit 02d577e

Please sign in to comment.