Skip to content

Commit

Permalink
arm64: kvm: Introduce nvhe stack size constants
Browse files Browse the repository at this point in the history
Refactor nvhe stack code to use NVHE_STACK_SIZE/SHIFT constants,
instead of directly using PAGE_SIZE/SHIFT. This makes the code a bit
easier to read, without introducing any functional changes.

Cc: Marc Zyngier <maz@kernel.org>
Cc: Mark Brown <broonie@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
Link: https://lore.kernel.org/r/20241112003336.1375584-1-kaleshsingh@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
  • Loading branch information
Kalesh Singh authored and Marc Zyngier committed Jan 8, 2025
1 parent 6834403 commit 38f9e4b
Show file tree
Hide file tree
Showing 8 changed files with 33 additions and 30 deletions.
5 changes: 4 additions & 1 deletion arch/arm64/include/asm/memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -145,13 +145,16 @@

#define OVERFLOW_STACK_SIZE SZ_4K

#define NVHE_STACK_SHIFT PAGE_SHIFT
#define NVHE_STACK_SIZE (UL(1) << NVHE_STACK_SHIFT)

/*
* With the minimum frame size of [x29, x30], exactly half the combined
* sizes of the hyp and overflow stacks is the maximum size needed to
* save the unwinded stacktrace; plus an additional entry to delimit the
* end.
*/
#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + PAGE_SIZE) / 2 + sizeof(long))
#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + NVHE_STACK_SIZE) / 2 + sizeof(long))

/*
* Alignment of kernel segments (e.g. .text, .data).
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/include/asm/stacktrace/nvhe.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ static inline void kvm_nvhe_unwind_init(struct unwind_state *state,

DECLARE_KVM_NVHE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base);

void kvm_nvhe_dump_backtrace(unsigned long hyp_offset);

Expand Down
18 changes: 9 additions & 9 deletions arch/arm64/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ static enum kvm_wfx_trap_policy kvm_wfe_trap_policy __read_mostly = KVM_WFX_NOTR

DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);

DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base);
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);

DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
Expand Down Expand Up @@ -2339,7 +2339,7 @@ static void __init teardown_hyp_mode(void)

free_hyp_pgds();
for_each_possible_cpu(cpu) {
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT);
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());

if (free_sve) {
Expand Down Expand Up @@ -2527,15 +2527,15 @@ static int __init init_hyp_mode(void)
* Allocate stack pages for Hypervisor-mode
*/
for_each_possible_cpu(cpu) {
unsigned long stack_page;
unsigned long stack_base;

stack_page = __get_free_page(GFP_KERNEL);
if (!stack_page) {
stack_base = __get_free_pages(GFP_KERNEL, NVHE_STACK_SHIFT - PAGE_SHIFT);
if (!stack_base) {
err = -ENOMEM;
goto out_err;
}

per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
per_cpu(kvm_arm_hyp_stack_base, cpu) = stack_base;
}

/*
Expand Down Expand Up @@ -2604,9 +2604,9 @@ static int __init init_hyp_mode(void)
*/
for_each_possible_cpu(cpu) {
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
char *stack_base = (char *)per_cpu(kvm_arm_hyp_stack_base, cpu);

err = create_hyp_stack(__pa(stack_page), &params->stack_hyp_va);
err = create_hyp_stack(__pa(stack_base), &params->stack_hyp_va);
if (err) {
kvm_err("Cannot map hyp stack\n");
goto out_err;
Expand All @@ -2618,7 +2618,7 @@ static int __init init_hyp_mode(void)
* __hyp_pa() won't do the right thing there, since the stack
* has been mapped in the flexible private VA space.
*/
params->stack_pa = __pa(stack_page);
params->stack_pa = __pa(stack_base);
}

for_each_possible_cpu(cpu) {
Expand Down
4 changes: 2 additions & 2 deletions arch/arm64/kvm/hyp/nvhe/host.S
Original file line number Diff line number Diff line change
Expand Up @@ -188,12 +188,12 @@ SYM_FUNC_END(__host_hvc)

/*
* Test whether the SP has overflowed, without corrupting a GPR.
* nVHE hypervisor stacks are aligned so that the PAGE_SHIFT bit
* nVHE hypervisor stacks are aligned so that the NVHE_STACK_SHIFT bit
* of SP should always be 1.
*/
add sp, sp, x0 // sp' = sp + x0
sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
tbz x0, #PAGE_SHIFT, .L__hyp_sp_overflow\@
tbz x0, #NVHE_STACK_SHIFT, .L__hyp_sp_overflow\@
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp

Expand Down
12 changes: 6 additions & 6 deletions arch/arm64/kvm/hyp/nvhe/mm.c
Original file line number Diff line number Diff line change
Expand Up @@ -360,10 +360,10 @@ int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr)

prev_base = __io_map_base;
/*
* Efficient stack verification using the PAGE_SHIFT bit implies
* Efficient stack verification using the NVHE_STACK_SHIFT bit implies
* an alignment of our allocation on the order of the size.
*/
size = PAGE_SIZE * 2;
size = NVHE_STACK_SIZE * 2;
addr = ALIGN(__io_map_base, size);

ret = __pkvm_alloc_private_va_range(addr, size);
Expand All @@ -373,12 +373,12 @@ int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr)
* at the higher address and leave the lower guard page
* unbacked.
*
* Any valid stack address now has the PAGE_SHIFT bit as 1
* Any valid stack address now has the NVHE_STACK_SHIFT bit as 1
* and addresses corresponding to the guard page have the
* PAGE_SHIFT bit as 0 - this is used for overflow detection.
* NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection.
*/
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + PAGE_SIZE,
PAGE_SIZE, phys, PAGE_HYP);
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + NVHE_STACK_SIZE,
NVHE_STACK_SIZE, phys, PAGE_HYP);
if (ret)
__io_map_base = prev_base;
}
Expand Down
4 changes: 2 additions & 2 deletions arch/arm64/kvm/hyp/nvhe/stacktrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr(&kvm_stacktrace_info);
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);

stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - PAGE_SIZE);
stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - NVHE_STACK_SIZE);
stacktrace_info->overflow_stack_base = (unsigned long)this_cpu_ptr(overflow_stack);
stacktrace_info->fp = fp;
stacktrace_info->pc = pc;
Expand All @@ -54,7 +54,7 @@ static struct stack_info stackinfo_get_hyp(void)
{
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
unsigned long high = params->stack_hyp_va;
unsigned long low = high - PAGE_SIZE;
unsigned long low = high - NVHE_STACK_SIZE;

return (struct stack_info) {
.low = low,
Expand Down
12 changes: 6 additions & 6 deletions arch/arm64/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -706,10 +706,10 @@ int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr)

mutex_lock(&kvm_hyp_pgd_mutex);
/*
* Efficient stack verification using the PAGE_SHIFT bit implies
* Efficient stack verification using the NVHE_STACK_SHIFT bit implies
* an alignment of our allocation on the order of the size.
*/
size = PAGE_SIZE * 2;
size = NVHE_STACK_SIZE * 2;
base = ALIGN_DOWN(io_map_base - size, size);

ret = __hyp_alloc_private_va_range(base);
Expand All @@ -726,12 +726,12 @@ int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr)
* at the higher address and leave the lower guard page
* unbacked.
*
* Any valid stack address now has the PAGE_SHIFT bit as 1
* Any valid stack address now has the NVHE_STACK_SHIFT bit as 1
* and addresses corresponding to the guard page have the
* PAGE_SHIFT bit as 0 - this is used for overflow detection.
* NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection.
*/
ret = __create_hyp_mappings(base + PAGE_SIZE, PAGE_SIZE, phys_addr,
PAGE_HYP);
ret = __create_hyp_mappings(base + NVHE_STACK_SIZE, NVHE_STACK_SIZE,
phys_addr, PAGE_HYP);
if (ret)
kvm_err("Cannot map hyp stack\n");

Expand Down
6 changes: 3 additions & 3 deletions arch/arm64/kvm/stacktrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ static struct stack_info stackinfo_get_hyp(void)
struct kvm_nvhe_stacktrace_info *stacktrace_info
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
unsigned long low = (unsigned long)stacktrace_info->stack_base;
unsigned long high = low + PAGE_SIZE;
unsigned long high = low + NVHE_STACK_SIZE;

return (struct stack_info) {
.low = low,
Expand All @@ -61,8 +61,8 @@ static struct stack_info stackinfo_get_hyp(void)

static struct stack_info stackinfo_get_hyp_kern_va(void)
{
unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
unsigned long high = low + PAGE_SIZE;
unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_base);
unsigned long high = low + NVHE_STACK_SIZE;

return (struct stack_info) {
.low = low,
Expand Down

0 comments on commit 38f9e4b

Please sign in to comment.