Skip to content

Commit

Permalink
Merge branch 'upstream-x86-selftests' into WIP.x86/pti.base
Browse files Browse the repository at this point in the history
Conflicts:
	arch/x86/kernel/cpu/Makefile

Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Ingo Molnar committed Dec 17, 2017
2 parents 0fd2e9c + fec8f5a commit 650400b
Show file tree
Hide file tree
Showing 19 changed files with 613 additions and 522 deletions.
2 changes: 1 addition & 1 deletion Documentation/x86/x86_64/mm.txt
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ ff92000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space
ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole
ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
... unused hole ...
ffd8000000000000 - fff7ffffffffffff (=53 bits) kasan shadow memory (8PB)
ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
... unused hole ...
ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
... unused hole ...
Expand Down
1 change: 0 additions & 1 deletion arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,6 @@ config ARCH_SUPPORTS_DEBUG_PAGEALLOC
config KASAN_SHADOW_OFFSET
hex
depends on KASAN
default 0xdff8000000000000 if X86_5LEVEL
default 0xdffffc0000000000

config HAVE_INTEL_TXT
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/entry/syscalls/Makefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
out := $(obj)/../../include/generated/asm
uapi := $(obj)/../../include/generated/uapi/asm
out := arch/$(SRCARCH)/include/generated/asm
uapi := arch/$(SRCARCH)/include/generated/uapi/asm

# Create output directory if not already present
_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \
Expand Down
545 changes: 272 additions & 273 deletions arch/x86/include/asm/cpufeatures.h

Large diffs are not rendered by default.

3 changes: 1 addition & 2 deletions arch/x86/include/asm/pgtable_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -200,10 +200,9 @@ enum page_cache_mode {

#define _PAGE_ENC (_AT(pteval_t, sme_me_mask))

#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
_PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_ENC)
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
_PAGE_DIRTY | _PAGE_ENC)
#define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER)

#define __PAGE_KERNEL_ENC (__PAGE_KERNEL | _PAGE_ENC)
#define __PAGE_KERNEL_ENC_WP (__PAGE_KERNEL_WP | _PAGE_ENC)
Expand Down
3 changes: 2 additions & 1 deletion arch/x86/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,8 @@ endif
KASAN_SANITIZE_head$(BITS).o := n
KASAN_SANITIZE_dumpstack.o := n
KASAN_SANITIZE_dumpstack_$(BITS).o := n
KASAN_SANITIZE_stacktrace.o := n
KASAN_SANITIZE_stacktrace.o := n
KASAN_SANITIZE_paravirt.o := n

OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y
Expand Down
26 changes: 11 additions & 15 deletions arch/x86/kernel/cpu/cpuid-deps.c
Original file line number Diff line number Diff line change
Expand Up @@ -62,23 +62,19 @@ const static struct cpuid_dep cpuid_deps[] = {
{}
};

static inline void __clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit)
{
clear_bit32(bit, c->x86_capability);
}

static inline void __setup_clear_cpu_cap(unsigned int bit)
{
clear_cpu_cap(&boot_cpu_data, bit);
set_bit32(bit, cpu_caps_cleared);
}

static inline void clear_feature(struct cpuinfo_x86 *c, unsigned int feature)
{
if (!c)
__setup_clear_cpu_cap(feature);
else
__clear_cpu_cap(c, feature);
/*
* Note: This could use the non atomic __*_bit() variants, but the
* rest of the cpufeature code uses atomics as well, so keep it for
* consistency. Cleanup all of it separately.
*/
if (!c) {
clear_cpu_cap(&boot_cpu_data, feature);
set_bit(feature, (unsigned long *)cpu_caps_cleared);
} else {
clear_bit(feature, (unsigned long *)c->x86_capability);
}
}

/* Take the capabilities and the BUG bits into account */
Expand Down
11 changes: 6 additions & 5 deletions arch/x86/kernel/head_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -38,11 +38,12 @@
*
*/

#define p4d_index(x) (((x) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))

#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH)
PGD_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE)
PGD_START_KERNEL = pgd_index(__START_KERNEL_map)
#endif
L3_START_KERNEL = pud_index(__START_KERNEL_map)

.text
Expand Down Expand Up @@ -362,10 +363,7 @@ NEXT_PAGE(early_dynamic_pgts)

.data

#ifndef CONFIG_XEN
NEXT_PAGE(init_top_pgt)
.fill 512,8,0
#else
#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH)
NEXT_PAGE(init_top_pgt)
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
.org init_top_pgt + PGD_PAGE_OFFSET*8, 0
Expand All @@ -382,6 +380,9 @@ NEXT_PAGE(level2_ident_pgt)
* Don't set NX because code runs from these pages.
*/
PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
#else
NEXT_PAGE(init_top_pgt)
.fill 512,8,0
#endif

#ifdef CONFIG_X86_5LEVEL
Expand Down
10 changes: 5 additions & 5 deletions arch/x86/mm/init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -1426,16 +1426,16 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)

#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
void register_page_bootmem_memmap(unsigned long section_nr,
struct page *start_page, unsigned long size)
struct page *start_page, unsigned long nr_pages)
{
unsigned long addr = (unsigned long)start_page;
unsigned long end = (unsigned long)(start_page + size);
unsigned long end = (unsigned long)(start_page + nr_pages);
unsigned long next;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
unsigned int nr_pages;
unsigned int nr_pmd_pages;
struct page *page;

for (; addr < end; addr = next) {
Expand Down Expand Up @@ -1482,9 +1482,9 @@ void register_page_bootmem_memmap(unsigned long section_nr,
if (pmd_none(*pmd))
continue;

nr_pages = 1 << (get_order(PMD_SIZE));
nr_pmd_pages = 1 << get_order(PMD_SIZE);
page = pmd_page(*pmd);
while (nr_pages--)
while (nr_pmd_pages--)
get_page_bootmem(section_nr, page++,
SECTION_INFO);
}
Expand Down
101 changes: 80 additions & 21 deletions arch/x86/mm/kasan_init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@

extern struct range pfn_mapped[E820_MAX_ENTRIES];

static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);

static int __init map_range(struct range *range)
{
unsigned long start;
Expand All @@ -31,8 +33,10 @@ static void __init clear_pgds(unsigned long start,
unsigned long end)
{
pgd_t *pgd;
/* See comment in kasan_init() */
unsigned long pgd_end = end & PGDIR_MASK;

for (; start < end; start += PGDIR_SIZE) {
for (; start < pgd_end; start += PGDIR_SIZE) {
pgd = pgd_offset_k(start);
/*
* With folded p4d, pgd_clear() is nop, use p4d_clear()
Expand All @@ -43,29 +47,61 @@ static void __init clear_pgds(unsigned long start,
else
pgd_clear(pgd);
}

pgd = pgd_offset_k(start);
for (; start < end; start += P4D_SIZE)
p4d_clear(p4d_offset(pgd, start));
}

static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
{
unsigned long p4d;

if (!IS_ENABLED(CONFIG_X86_5LEVEL))
return (p4d_t *)pgd;

p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
p4d += __START_KERNEL_map - phys_base;
return (p4d_t *)p4d + p4d_index(addr);
}

static void __init kasan_early_p4d_populate(pgd_t *pgd,
unsigned long addr,
unsigned long end)
{
pgd_t pgd_entry;
p4d_t *p4d, p4d_entry;
unsigned long next;

if (pgd_none(*pgd)) {
pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d));
set_pgd(pgd, pgd_entry);
}

p4d = early_p4d_offset(pgd, addr);
do {
next = p4d_addr_end(addr, end);

if (!p4d_none(*p4d))
continue;

p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud));
set_p4d(p4d, p4d_entry);
} while (p4d++, addr = next, addr != end && p4d_none(*p4d));
}

static void __init kasan_map_early_shadow(pgd_t *pgd)
{
int i;
unsigned long start = KASAN_SHADOW_START;
/* See comment in kasan_init() */
unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK;
unsigned long end = KASAN_SHADOW_END;
unsigned long next;

for (i = pgd_index(start); start < end; i++) {
switch (CONFIG_PGTABLE_LEVELS) {
case 4:
pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud) |
_KERNPG_TABLE);
break;
case 5:
pgd[i] = __pgd(__pa_nodebug(kasan_zero_p4d) |
_KERNPG_TABLE);
break;
default:
BUILD_BUG();
}
start += PGDIR_SIZE;
}
pgd += pgd_index(addr);
do {
next = pgd_addr_end(addr, end);
kasan_early_p4d_populate(pgd, addr, next);
} while (pgd++, addr = next, addr != end);
}

#ifdef CONFIG_KASAN_INLINE
Expand Down Expand Up @@ -102,7 +138,7 @@ void __init kasan_early_init(void)
for (i = 0; i < PTRS_PER_PUD; i++)
kasan_zero_pud[i] = __pud(pud_val);

for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++)
for (i = 0; IS_ENABLED(CONFIG_X86_5LEVEL) && i < PTRS_PER_P4D; i++)
kasan_zero_p4d[i] = __p4d(p4d_val);

kasan_map_early_shadow(early_top_pgt);
Expand All @@ -118,12 +154,35 @@ void __init kasan_init(void)
#endif

memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));

/*
* We use the same shadow offset for 4- and 5-level paging to
* facilitate boot-time switching between paging modes.
* As result in 5-level paging mode KASAN_SHADOW_START and
* KASAN_SHADOW_END are not aligned to PGD boundary.
*
* KASAN_SHADOW_START doesn't share PGD with anything else.
* We claim whole PGD entry to make things easier.
*
* KASAN_SHADOW_END lands in the last PGD entry and it collides with
* bunch of things like kernel code, modules, EFI mapping, etc.
* We need to take extra steps to not overwrite them.
*/
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
void *ptr;

ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
__pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
}

load_cr3(early_top_pgt);
__flush_tlb_all();

clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);

kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
kasan_mem_to_shadow((void *)PAGE_OFFSET));

for (i = 0; i < E820_MAX_ENTRIES; i++) {
Expand Down
Loading

0 comments on commit 650400b

Please sign in to comment.