Skip to content

Commit

Permalink
x86, 64bit: Use a #PF handler to materialize early mappings on demand
Browse files Browse the repository at this point in the history
Linear mode (CR0.PG = 0) is mutually exclusive with 64-bit mode; all
64-bit code has to use page tables.  This makes it awkward before we
have first set up properly all-covering page tables to access objects
that are outside the static kernel range.

So far we have dealt with that simply by mapping a fixed amount of
low memory, but that fails in at least two upcoming use cases:

1. We will support load and run kernel, struct boot_params, ramdisk,
   command line, etc. above the 4 GiB mark.
2. need to access ramdisk early to get microcode to update that as
   early possible.

We could use early_iomap to access them too, but it will make code to
messy and hard to be unified with 32 bit.

Hence, set up a #PF table and use a fixed number of buffers to set up
page tables on demand.  If the buffers fill up then we simply flush
them and start over.  These buffers are all in __initdata, so it does
not increase RAM usage at runtime.

Thus, with the help of the #PF handler, we can set the final kernel
mapping from blank, and switch to init_level4_pgt later.

During the switchover in head_64.S, before #PF handler is available,
we use three pages to handle kernel crossing 1G, 512G boundaries with
sharing page by playing games with page aliasing: the same page is
mapped twice in the higher-level tables with appropriate wraparound.
The kernel region itself will be properly mapped; other mappings may
be spurious.

early_make_pgtable is using kernel high mapping address to access pages
to set page table.

-v4: Add phys_base offset to make kexec happy, and add
	init_mapping_kernel()   - Yinghai
-v5: fix compiling with xen, and add back ident level3 and level2 for xen
     also move back init_level4_pgt from BSS to DATA again.
     because we have to clear it anyway.  - Yinghai
-v6: switch to init_level4_pgt in init_mem_mapping. - Yinghai
-v7: remove not needed clear_page for init_level4_page
     it is with fill 512,8,0 already in head_64.S  - Yinghai
-v8: we need to keep that handler alive until init_mem_mapping and don't
     let early_trap_init to trash that early #PF handler.
     So split early_trap_pf_init out and move it down. - Yinghai
-v9: switchover only cover kernel space instead of 1G so could avoid
     touch possible mem holes. - Yinghai
-v11: change far jmp back to far return to initial_code, that is needed
     to fix failure that is reported by Konrad on AMD systems.  - Yinghai

Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1359058816-7615-12-git-send-email-yinghai@kernel.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
  • Loading branch information
H. Peter Anvin authored and H. Peter Anvin committed Jan 29, 2013
1 parent 4f7b922 commit 8170e6b
Show file tree
Hide file tree
Showing 7 changed files with 219 additions and 91 deletions.
4 changes: 4 additions & 0 deletions arch/x86/include/asm/pgtable_64_types.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#ifndef _ASM_X86_PGTABLE_64_DEFS_H
#define _ASM_X86_PGTABLE_64_DEFS_H

#include <asm/sparsemem.h>

#ifndef __ASSEMBLY__
#include <linux/types.h>

Expand Down Expand Up @@ -60,4 +62,6 @@ typedef struct { pteval_t pte; } pte_t;
#define MODULES_END _AC(0xffffffffff000000, UL)
#define MODULES_LEN (MODULES_END - MODULES_VADDR)

#define EARLY_DYNAMIC_PAGE_TABLES 64

#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
1 change: 1 addition & 0 deletions arch/x86/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -731,6 +731,7 @@ extern void enable_sep_cpu(void);
extern int sysenter_setup(void);

extern void early_trap_init(void);
void early_trap_pf_init(void);

/* Defined in head.S */
extern struct desc_ptr early_gdt_descr;
Expand Down
81 changes: 74 additions & 7 deletions arch/x86/kernel/head64.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,73 @@
#include <asm/bios_ebda.h>
#include <asm/bootparam_utils.h>

static void __init zap_identity_mappings(void)
/*
* Manage page tables very early on.
*/
extern pgd_t early_level4_pgt[PTRS_PER_PGD];
extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
static unsigned int __initdata next_early_pgt = 2;

/* Wipe all early page tables except for the kernel symbol map */
static void __init reset_early_page_tables(void)
{
pgd_t *pgd = pgd_offset_k(0UL);
pgd_clear(pgd);
__flush_tlb_all();
unsigned long i;

for (i = 0; i < PTRS_PER_PGD-1; i++)
early_level4_pgt[i].pgd = 0;

next_early_pgt = 0;

write_cr3(__pa(early_level4_pgt));
}

/* Create a new PMD entry */
int __init early_make_pgtable(unsigned long address)
{
unsigned long physaddr = address - __PAGE_OFFSET;
unsigned long i;
pgdval_t pgd, *pgd_p;
pudval_t *pud_p;
pmdval_t pmd, *pmd_p;

/* Invalid address or early pgt is done ? */
if (physaddr >= MAXMEM || read_cr3() != __pa(early_level4_pgt))
return -1;

i = (address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1);
pgd_p = &early_level4_pgt[i].pgd;
pgd = *pgd_p;

/*
* The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
* critical -- __PAGE_OFFSET would point us back into the dynamic
* range and we might end up looping forever...
*/
if (pgd && next_early_pgt < EARLY_DYNAMIC_PAGE_TABLES) {
pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
} else {
if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES-1)
reset_early_page_tables();

pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
for (i = 0; i < PTRS_PER_PUD; i++)
pud_p[i] = 0;

*pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
}
i = (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
pud_p += i;

pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
pmd = (physaddr & PUD_MASK) + (__PAGE_KERNEL_LARGE & ~_PAGE_GLOBAL);
for (i = 0; i < PTRS_PER_PMD; i++) {
pmd_p[i] = pmd;
pmd += PMD_SIZE;
}

*pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;

return 0;
}

/* Don't add a printk in there. printk relies on the PDA which is not initialized
Expand Down Expand Up @@ -72,12 +134,13 @@ void __init x86_64_start_kernel(char * real_mode_data)
(__START_KERNEL & PGDIR_MASK)));
BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);

/* Kill off the identity-map trampoline */
reset_early_page_tables();

/* clear bss before set_intr_gate with early_idt_handler */
clear_bss();

/* Make NULL pointers segfault */
zap_identity_mappings();

/* XXX - this is wrong... we need to build page tables from scratch */
max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT;

for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) {
Expand All @@ -94,6 +157,10 @@ void __init x86_64_start_kernel(char * real_mode_data)
if (console_loglevel == 10)
early_printk("Kernel alive\n");

clear_page(init_level4_pgt);
/* set init_level4_pgt kernel high mapping*/
init_level4_pgt[511] = early_level4_pgt[511];

x86_64_start_reservations(real_mode_data);
}

Expand Down
Loading

0 comments on commit 8170e6b

Please sign in to comment.