Skip to content

Commit

Permalink
arm64: head: Move early kernel mapping routines into C code
Browse files Browse the repository at this point in the history
The asm version of the kernel mapping code works fine for creating a
coarse grained identity map, but for mapping the kernel down to its
exact boundaries with the right attributes, it is not suitable. This is
why we create a preliminary RWX kernel mapping first, and then rebuild
it from scratch later on.

So let's reimplement this in C, in a way that will make it unnecessary
to create the kernel page tables yet another time in paging_init().

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20240214122845.2033971-63-ardb+git@google.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
  • Loading branch information
Ard Biesheuvel authored and Catalin Marinas committed Feb 16, 2024
1 parent 82ca151 commit 97a6f43
Show file tree
Hide file tree
Showing 15 changed files with 315 additions and 121 deletions.
2 changes: 0 additions & 2 deletions arch/arm64/include/asm/archrandom.h
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,4 @@ static inline bool __init __early_cpu_has_rndr(void)
return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
}

u64 kaslr_early_init(void *fdt);

#endif /* _ASM_ARCHRANDOM_H */
32 changes: 3 additions & 29 deletions arch/arm64/include/asm/scs.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,37 +33,11 @@
#include <asm/cpufeature.h>

#ifdef CONFIG_UNWIND_PATCH_PAC_INTO_SCS
static inline bool should_patch_pac_into_scs(void)
{
u64 reg;

/*
* We only enable the shadow call stack dynamically if we are running
* on a system that does not implement PAC or BTI. PAC and SCS provide
* roughly the same level of protection, and BTI relies on the PACIASP
* instructions serving as landing pads, preventing us from patching
* those instructions into something else.
*/
reg = read_sysreg_s(SYS_ID_AA64ISAR1_EL1);
if (SYS_FIELD_GET(ID_AA64ISAR1_EL1, APA, reg) |
SYS_FIELD_GET(ID_AA64ISAR1_EL1, API, reg))
return false;

reg = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
if (SYS_FIELD_GET(ID_AA64ISAR2_EL1, APA3, reg))
return false;

if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) {
reg = read_sysreg_s(SYS_ID_AA64PFR1_EL1);
if (reg & (0xf << ID_AA64PFR1_EL1_BT_SHIFT))
return false;
}
return true;
}

static inline void dynamic_scs_init(void)
{
if (should_patch_pac_into_scs()) {
extern bool __pi_dynamic_scs_is_enabled;

if (__pi_dynamic_scs_is_enabled) {
pr_info("Enabling dynamic shadow call stack\n");
static_branch_enable(&dynamic_scs_enabled);
}
Expand Down
52 changes: 1 addition & 51 deletions arch/arm64/kernel/head.S
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,6 @@
* x20 primary_entry() .. __primary_switch() CPU boot mode
* x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x0
* x22 create_idmap() .. start_kernel() ID map VA of the DT blob
* x23 __primary_switch() physical misalignment/KASLR offset
* x25 primary_entry() .. start_kernel() supported VA size
* x28 create_idmap() callee preserved temp register
*/
Expand Down Expand Up @@ -408,24 +407,6 @@ SYM_FUNC_START_LOCAL(create_idmap)
0: ret x28
SYM_FUNC_END(create_idmap)

SYM_FUNC_START_LOCAL(create_kernel_mapping)
adrp x0, init_pg_dir
mov_q x5, KIMAGE_VADDR // compile time __va(_text)
#ifdef CONFIG_RELOCATABLE
add x5, x5, x23 // add KASLR displacement
#endif
adrp x6, _end // runtime __pa(_end)
adrp x3, _text // runtime __pa(_text)
sub x6, x6, x3 // _end - _text
add x6, x6, x5 // runtime __va(_end)
mov_q x7, SWAPPER_RW_MMUFLAGS

map_memory x0, x1, x5, x6, x7, x3, (VA_BITS - PGDIR_SHIFT), x10, x11, x12, x13, x14

dsb ishst // sync with page table walker
ret
SYM_FUNC_END(create_kernel_mapping)

/*
* Initialize CPU registers with task-specific and cpu-specific context.
*
Expand Down Expand Up @@ -752,44 +733,13 @@ SYM_FUNC_START_LOCAL(__primary_switch)
adrp x2, init_idmap_pg_dir
bl __enable_mmu

// Clear BSS
adrp x0, __bss_start
mov x1, xzr
adrp x2, init_pg_end
sub x2, x2, x0
bl __pi_memset
dsb ishst // Make zero page visible to PTW

adrp x1, early_init_stack
mov sp, x1
mov x29, xzr
mov x0, x20 // pass the full boot status
mov x1, x22 // pass the low FDT mapping
bl __pi_init_feature_override // Parse cpu feature overrides

#ifdef CONFIG_RELOCATABLE
adrp x23, KERNEL_START
and x23, x23, MIN_KIMG_ALIGN - 1
#ifdef CONFIG_RANDOMIZE_BASE
mov x0, x22
bl __pi_kaslr_early_init
bic x0, x0, #SZ_2M - 1
orr x23, x23, x0 // record kernel offset
#endif
#endif
bl create_kernel_mapping
bl __pi_early_map_kernel // Map and relocate the kernel

adrp x1, init_pg_dir
load_ttbr1 x1, x1, x2
#ifdef CONFIG_RELOCATABLE
mov x0, x23
bl __pi_relocate_kernel
#endif
#ifdef CONFIG_UNWIND_PATCH_PAC_INTO_SCS
ldr x0, =__eh_frame_start
ldr x1, =__eh_frame_end
bl __pi_scs_patch_vmlinux
#endif
ldr x8, =__primary_switched
adrp x0, KERNEL_START // __pa(KERNEL_START)
br x8
Expand Down
19 changes: 19 additions & 0 deletions arch/arm64/kernel/image-vars.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,28 @@ PROVIDE(__pi_id_aa64pfr1_override = id_aa64pfr1_override);
PROVIDE(__pi_id_aa64smfr0_override = id_aa64smfr0_override);
PROVIDE(__pi_id_aa64zfr0_override = id_aa64zfr0_override);
PROVIDE(__pi_arm64_sw_feature_override = arm64_sw_feature_override);
PROVIDE(__pi_arm64_use_ng_mappings = arm64_use_ng_mappings);
#ifdef CONFIG_CAVIUM_ERRATUM_27456
PROVIDE(__pi_cavium_erratum_27456_cpus = cavium_erratum_27456_cpus);
#endif
PROVIDE(__pi__ctype = _ctype);
PROVIDE(__pi_memstart_offset_seed = memstart_offset_seed);

PROVIDE(__pi_init_pg_dir = init_pg_dir);
PROVIDE(__pi_init_pg_end = init_pg_end);

PROVIDE(__pi__text = _text);
PROVIDE(__pi__stext = _stext);
PROVIDE(__pi__etext = _etext);
PROVIDE(__pi___start_rodata = __start_rodata);
PROVIDE(__pi___inittext_begin = __inittext_begin);
PROVIDE(__pi___inittext_end = __inittext_end);
PROVIDE(__pi___initdata_begin = __initdata_begin);
PROVIDE(__pi___initdata_end = __initdata_end);
PROVIDE(__pi__data = _data);
PROVIDE(__pi___bss_start = __bss_start);
PROVIDE(__pi__end = _end);

#ifdef CONFIG_KVM

/*
Expand Down
1 change: 1 addition & 0 deletions arch/arm64/kernel/pi/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ $(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
$(call if_changed_rule,cc_o_c)

obj-y := idreg-override.pi.o \
map_kernel.pi.o map_range.pi.o \
lib-fdt.pi.o lib-fdt_ro.pi.o
obj-$(CONFIG_RELOCATABLE) += relocate.pi.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr_early.pi.o
Expand Down
22 changes: 10 additions & 12 deletions arch/arm64/kernel/pi/idreg-override.c
Original file line number Diff line number Diff line change
Expand Up @@ -308,37 +308,35 @@ static __init void __parse_cmdline(const char *cmdline, bool parse_aliases)
} while (1);
}

static __init const u8 *get_bootargs_cmdline(const void *fdt)
static __init const u8 *get_bootargs_cmdline(const void *fdt, int node)
{
static char const bootargs[] __initconst = "bootargs";
const u8 *prop;
int node;

node = fdt_path_offset(fdt, "/chosen");
if (node < 0)
return NULL;

prop = fdt_getprop(fdt, node, "bootargs", NULL);
prop = fdt_getprop(fdt, node, bootargs, NULL);
if (!prop)
return NULL;

return strlen(prop) ? prop : NULL;
}

static __init void parse_cmdline(const void *fdt)
static __init void parse_cmdline(const void *fdt, int chosen)
{
const u8 *prop = get_bootargs_cmdline(fdt);
static char const cmdline[] __initconst = CONFIG_CMDLINE;
const u8 *prop = get_bootargs_cmdline(fdt, chosen);

if (IS_ENABLED(CONFIG_CMDLINE_FORCE) || !prop)
__parse_cmdline(CONFIG_CMDLINE, true);
__parse_cmdline(cmdline, true);

if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && prop)
__parse_cmdline(prop, true);
}

/* Keep checkers quiet */
void init_feature_override(u64 boot_status, const void *fdt);

asmlinkage void __init init_feature_override(u64 boot_status, const void *fdt)
void __init init_feature_override(u64 boot_status, const void *fdt,
int chosen)
{
struct arm64_ftr_override *override;
const struct ftr_set_desc *reg;
Expand All @@ -354,7 +352,7 @@ asmlinkage void __init init_feature_override(u64 boot_status, const void *fdt)

__boot_status = boot_status;

parse_cmdline(fdt);
parse_cmdline(fdt, chosen);

for (i = 0; i < ARRAY_SIZE(regs); i++) {
reg = prel64_pointer(regs[i].reg);
Expand Down
12 changes: 6 additions & 6 deletions arch/arm64/kernel/pi/kaslr_early.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,17 +16,17 @@
#include <asm/memory.h>
#include <asm/pgtable.h>

#include "pi.h"

extern u16 memstart_offset_seed;

static u64 __init get_kaslr_seed(void *fdt)
static u64 __init get_kaslr_seed(void *fdt, int node)
{
static char const chosen_str[] __initconst = "chosen";
static char const seed_str[] __initconst = "kaslr-seed";
int node, len;
fdt64_t *prop;
u64 ret;
int len;

node = fdt_path_offset(fdt, chosen_str);
if (node < 0)
return 0;

Expand All @@ -39,14 +39,14 @@ static u64 __init get_kaslr_seed(void *fdt)
return ret;
}

asmlinkage u64 __init kaslr_early_init(void *fdt)
u64 __init kaslr_early_init(void *fdt, int chosen)
{
u64 seed, range;

if (kaslr_disabled_cmdline())
return 0;

seed = get_kaslr_seed(fdt);
seed = get_kaslr_seed(fdt, chosen);
if (!seed) {
if (!__early_cpu_has_rndr() ||
!__arm64_rndr((unsigned long *)&seed))
Expand Down
Loading

0 comments on commit 97a6f43

Please sign in to comment.