Skip to content

Commit

Permalink
KVM: arm64: Remove __hyp_text macro, use build rules instead
Browse files Browse the repository at this point in the history
With nVHE code now fully separated from the rest of the kernel, the effects of
the __hyp_text macro (which had to be applied on all nVHE code) can be
achieved with build rules instead. The macro used to:
  (a) move code to .hyp.text ELF section, now done by renaming .text using
      `objcopy`, and
  (b) `notrace` and `__noscs` would negate effects of CC_FLAGS_FTRACE and
      CC_FLAGS_SCS, respectivelly, now those flags are  erased from
      KBUILD_CFLAGS (same way as in EFI stub).

Note that by removing __hyp_text from code shared with VHE, all VHE code is now
compiled into .text and without `notrace` and `__noscs`.

Use of '.pushsection .hyp.text' removed from assembly files as this is now also
covered by the build rules.

For MAINTAINERS: if needed to re-run, uses of macro were removed with the
following command. Formatting was fixed up manually.

  find arch/arm64/kvm/hyp -type f -name '*.c' -o -name '*.h' \
       -exec sed -i 's/ __hyp_text//g' {} +

Signed-off-by: David Brazdil <dbrazdil@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20200625131420.71444-15-dbrazdil@google.com
  • Loading branch information
David Brazdil authored and Marc Zyngier committed Jul 5, 2020
1 parent c04dd45 commit c50cb04
Show file tree
Hide file tree
Showing 17 changed files with 132 additions and 147 deletions.
2 changes: 1 addition & 1 deletion arch/arm64/include/asm/kvm_emulate.h
Original file line number Diff line number Diff line change
Expand Up @@ -516,7 +516,7 @@ static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_i
* Skip an instruction which has been emulated at hyp while most guest sysregs
* are live.
*/
static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
static __always_inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
{
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
Expand Down
2 changes: 0 additions & 2 deletions arch/arm64/include/asm/kvm_hyp.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@
#include <asm/alternative.h>
#include <asm/sysreg.h>

#define __hyp_text __section(.hyp.text) notrace __noscs

#define read_sysreg_elx(r,nvh,vh) \
({ \
u64 reg; \
Expand Down
6 changes: 3 additions & 3 deletions arch/arm64/kvm/hyp/aarch32.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ static const unsigned short cc_map[16] = {
/*
* Check if a trapped instruction should have been executed or not.
*/
bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
{
unsigned long cpsr;
u32 cpsr_cond;
Expand Down Expand Up @@ -93,7 +93,7 @@ bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
*
* IT[7:0] -> CPSR[26:25],CPSR[15:10]
*/
static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
{
unsigned long itbits, cond;
unsigned long cpsr = *vcpu_cpsr(vcpu);
Expand Down Expand Up @@ -123,7 +123,7 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
* kvm_skip_instr - skip a trapped instruction and proceed to the next
* @vcpu: The vcpu pointer
*/
void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
{
u32 pc = *vcpu_pc(vcpu);
bool is_thumb;
Expand Down
1 change: 0 additions & 1 deletion arch/arm64/kvm/hyp/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
#define CPU_SP_EL0_OFFSET (CPU_XREG_OFFSET(30) + 8)

.text
.pushsection .hyp.text, "ax"

/*
* We treat x18 as callee-saved as the host may use it as a platform
Expand Down
1 change: 0 additions & 1 deletion arch/arm64/kvm/hyp/fpsimd.S
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
#include <asm/fpsimdmacros.h>

.text
.pushsection .hyp.text, "ax"

SYM_FUNC_START(__fpsimd_save_state)
fpsimd_save x0, 1
Expand Down
1 change: 0 additions & 1 deletion arch/arm64/kvm/hyp/hyp-entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
#include <asm/mmu.h>

.text
.pushsection .hyp.text, "ax"

.macro do_el2_call
/*
Expand Down
16 changes: 8 additions & 8 deletions arch/arm64/kvm/hyp/include/hyp/debug-sr.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,9 +88,9 @@
default: write_debug(ptr[0], reg, 0); \
}

static inline void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
struct kvm_guest_debug_arch *dbg,
struct kvm_cpu_context *ctxt)
static inline void __debug_save_state(struct kvm_vcpu *vcpu,
struct kvm_guest_debug_arch *dbg,
struct kvm_cpu_context *ctxt)
{
u64 aa64dfr0;
int brps, wrps;
Expand All @@ -107,9 +107,9 @@ static inline void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1);
}

static inline void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
struct kvm_guest_debug_arch *dbg,
struct kvm_cpu_context *ctxt)
static inline void __debug_restore_state(struct kvm_vcpu *vcpu,
struct kvm_guest_debug_arch *dbg,
struct kvm_cpu_context *ctxt)
{
u64 aa64dfr0;
int brps, wrps;
Expand All @@ -127,7 +127,7 @@ static inline void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1);
}

static inline void __hyp_text __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
Expand All @@ -146,7 +146,7 @@ static inline void __hyp_text __debug_switch_to_guest_common(struct kvm_vcpu *vc
__debug_restore_state(vcpu, guest_dbg, guest_ctxt);
}

static inline void __hyp_text __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
Expand Down
36 changes: 18 additions & 18 deletions arch/arm64/kvm/hyp/include/hyp/switch.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
extern const char __hyp_panic_string[];

/* Check whether the FP regs were dirtied while in the host-side run loop: */
static inline bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
{
/*
* When the system doesn't support FP/SIMD, we cannot rely on
Expand All @@ -48,15 +48,15 @@ static inline bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
}

/* Save the 32-bit only FPSIMD system register state */
static inline void __hyp_text __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
{
if (!vcpu_el1_is_32bit(vcpu))
return;

vcpu->arch.ctxt.sys_regs[FPEXC32_EL2] = read_sysreg(fpexc32_el2);
}

static inline void __hyp_text __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
{
/*
* We are about to set CPTR_EL2.TFP to trap all floating point
Expand All @@ -73,7 +73,7 @@ static inline void __hyp_text __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
}
}

static inline void __hyp_text __activate_traps_common(struct kvm_vcpu *vcpu)
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
{
/* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
write_sysreg(1 << 15, hstr_el2);
Expand All @@ -89,13 +89,13 @@ static inline void __hyp_text __activate_traps_common(struct kvm_vcpu *vcpu)
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
}

static inline void __hyp_text __deactivate_traps_common(void)
static inline void __deactivate_traps_common(void)
{
write_sysreg(0, hstr_el2);
write_sysreg(0, pmuserenr_el0);
}

static inline void __hyp_text ___activate_traps(struct kvm_vcpu *vcpu)
static inline void ___activate_traps(struct kvm_vcpu *vcpu)
{
u64 hcr = vcpu->arch.hcr_el2;

Expand All @@ -108,7 +108,7 @@ static inline void __hyp_text ___activate_traps(struct kvm_vcpu *vcpu)
write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
}

static inline void __hyp_text ___deactivate_traps(struct kvm_vcpu *vcpu)
static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
{
/*
* If we pended a virtual abort, preserve it until it gets
Expand All @@ -122,12 +122,12 @@ static inline void __hyp_text ___deactivate_traps(struct kvm_vcpu *vcpu)
}
}

static inline void __hyp_text __activate_vm(struct kvm *kvm)
static inline void __activate_vm(struct kvm *kvm)
{
__load_guest_stage2(kvm);
}

static inline bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
{
u64 par, tmp;

Expand Down Expand Up @@ -156,7 +156,7 @@ static inline bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
return true;
}

static inline bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
{
u8 ec;
u64 esr;
Expand Down Expand Up @@ -196,7 +196,7 @@ static inline bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
}

/* Check for an FPSIMD/SVE trap and handle as appropriate */
static inline bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
{
bool vhe, sve_guest, sve_host;
u8 hsr_ec;
Expand Down Expand Up @@ -283,7 +283,7 @@ static inline bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
return true;
}

static inline bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
{
u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
int rt = kvm_vcpu_sys_get_rt(vcpu);
Expand Down Expand Up @@ -338,7 +338,7 @@ static inline bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
return true;
}

static inline bool __hyp_text esr_is_ptrauth_trap(u32 esr)
static inline bool esr_is_ptrauth_trap(u32 esr)
{
u32 ec = ESR_ELx_EC(esr);

Expand Down Expand Up @@ -371,7 +371,7 @@ static inline bool __hyp_text esr_is_ptrauth_trap(u32 esr)
regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
})

static inline bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *ctxt;
u64 val;
Expand Down Expand Up @@ -401,7 +401,7 @@ static inline bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
* the guest, false when we should restore the host state and return to the
* main run loop.
*/
static inline bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
Expand Down Expand Up @@ -473,15 +473,15 @@ static inline bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_
return false;
}

static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu)
{
if (!cpus_have_final_cap(ARM64_SSBD))
return false;

return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
}

static inline void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_ARM64_SSBD
/*
Expand All @@ -494,7 +494,7 @@ static inline void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu
#endif
}

static inline void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_ARM64_SSBD
/*
Expand Down
20 changes: 10 additions & 10 deletions arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,18 +15,18 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>

static inline void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
{
ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
}

static inline void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
{
ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
}

static inline void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
{
ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(SYS_SCTLR);
Expand All @@ -51,7 +51,7 @@ static inline void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ct
ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(SYS_SPSR);
}

static inline void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
{
ctxt->gp_regs.regs.pc = read_sysreg_el2(SYS_ELR);
ctxt->gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
Expand All @@ -60,18 +60,18 @@ static inline void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_cont
ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2);
}

static inline void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
{
write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
}

static inline void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
{
write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
}

static inline void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
{
write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
Expand Down Expand Up @@ -130,7 +130,7 @@ static inline void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context
write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],SYS_SPSR);
}

static inline void __hyp_text __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
{
u64 pstate = ctxt->gp_regs.regs.pstate;
u64 mode = pstate & PSR_AA32_MODE_MASK;
Expand All @@ -156,7 +156,7 @@ static inline void __hyp_text __sysreg_restore_el2_return_state(struct kvm_cpu_c
write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
}

static inline void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu)
{
u64 *spsr, *sysreg;

Expand All @@ -178,7 +178,7 @@ static inline void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2);
}

static inline void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu)
{
u64 *spsr, *sysreg;

Expand Down
8 changes: 7 additions & 1 deletion arch/arm64/kvm/hyp/nvhe/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,13 @@ $(obj)/%.hyp.o: $(obj)/%.hyp.tmp.o FORCE
$(call if_changed,hypcopy)

quiet_cmd_hypcopy = HYPCOPY $@
cmd_hypcopy = $(OBJCOPY) --prefix-symbols=__kvm_nvhe_ $< $@
cmd_hypcopy = $(OBJCOPY) --prefix-symbols=__kvm_nvhe_ \
--rename-section=.text=.hyp.text \
$< $@

# Remove ftrace and Shadow Call Stack CFLAGS.
# This is equivalent to the 'notrace' and '__noscs' annotations.
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))

# KVM nVHE code is run at a different exception code with a different map, so
# compiler instrumentation that inserts callbacks or checks into the code may
Expand Down
Loading

0 comments on commit c50cb04

Please sign in to comment.