Skip to content

Commit

Permalink
KVM: arm64: Don't hijack guest context MDSCR_EL1
Browse files Browse the repository at this point in the history
Stealing MDSCR_EL1 in the guest's kvm_cpu_context for external debugging
is rather gross. Just add a field for this instead and let the context
switch code pick the correct one based on the debug owner.

Tested-by: James Clark <james.clark@linaro.org>
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20241219224116.3941496-15-oliver.upton@linux.dev
Signed-off-by: Marc Zyngier <maz@kernel.org>
  • Loading branch information
Oliver Upton authored and Marc Zyngier committed Dec 20, 2024
1 parent 75a5fba commit 4ad3a0b
Show file tree
Hide file tree
Showing 3 changed files with 64 additions and 52 deletions.
2 changes: 1 addition & 1 deletion arch/arm64/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -751,6 +751,7 @@ struct kvm_vcpu_arch {
*/
struct kvm_guest_debug_arch vcpu_debug_state;
struct kvm_guest_debug_arch external_debug_state;
u64 external_mdscr_el1;

enum {
VCPU_DEBUG_FREE,
Expand All @@ -771,7 +772,6 @@ struct kvm_vcpu_arch {
* are using guest debug.
*/
struct {
u32 mdscr_el1;
bool pstate_ss;
} guest_debug_preserved;

Expand Down
75 changes: 36 additions & 39 deletions arch/arm64/kvm/debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,19 +31,12 @@
*/
static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
{
u64 val = vcpu_read_sys_reg(vcpu, MDSCR_EL1);

vcpu->arch.guest_debug_preserved.mdscr_el1 = val;
vcpu->arch.guest_debug_preserved.pstate_ss =
(*vcpu_cpsr(vcpu) & DBG_SPSR_SS);
}

static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
{
u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1;

vcpu_write_sys_reg(vcpu, val, MDSCR_EL1);

if (vcpu->arch.guest_debug_preserved.pstate_ss)
*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
else
Expand Down Expand Up @@ -115,8 +108,6 @@ static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)

void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
{
unsigned long mdscr;

/* Check if we need to use the debug registers. */
if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) {
/* Save guest debug state */
Expand Down Expand Up @@ -154,36 +145,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
else
*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;

mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
mdscr |= DBG_MDSCR_SS;
vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
} else {
mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
mdscr &= ~DBG_MDSCR_SS;
vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
}

/*
* Enable breakpoints and watchpoints if userspace wants them.
*/
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
mdscr |= DBG_MDSCR_MDE;
vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);

/*
* The OS Lock blocks debug exceptions in all ELs when it is
* enabled. If the guest has enabled the OS Lock, constrain its
* effects to the guest. Emulate the behavior by clearing
* MDSCR_EL1.MDE. In so doing, we ensure that host debug
* exceptions are unaffected by guest configuration of the OS
* Lock.
*/
} else if (kvm_vcpu_os_lock_enabled(vcpu)) {
mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
mdscr &= ~DBG_MDSCR_MDE;
vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
}
}
}
Expand Down Expand Up @@ -227,6 +188,41 @@ void kvm_init_host_debug_data(void)
host_data_set_flag(HAS_TRBE);
}

/*
* Configures the 'external' MDSCR_EL1 value for the guest, i.e. when the host
* has taken over MDSCR_EL1.
*
* - Userspace is single-stepping the guest, and MDSCR_EL1.SS is forced to 1.
*
* - Userspace is using the breakpoint/watchpoint registers to debug the
* guest, and MDSCR_EL1.MDE is forced to 1.
*
* - The guest has enabled the OS Lock, and KVM is forcing MDSCR_EL1.MDE to 0,
* masking all debug exceptions affected by the OS Lock.
*/
static void setup_external_mdscr(struct kvm_vcpu *vcpu)
{
/*
* Use the guest's MDSCR_EL1 as a starting point, since there are
* several other features controlled by MDSCR_EL1 that are not relevant
* to the host.
*
* Clear the bits that KVM may use which also satisfies emulation of
* the OS Lock as MDSCR_EL1.MDE is cleared.
*/
u64 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1) & ~(MDSCR_EL1_SS |
MDSCR_EL1_MDE |
MDSCR_EL1_KDE);

if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
mdscr |= MDSCR_EL1_SS;

if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW)
mdscr |= MDSCR_EL1_MDE | MDSCR_EL1_KDE;

vcpu->arch.external_mdscr_el1 = mdscr;
}

void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu)
{
u64 mdscr;
Expand All @@ -249,6 +245,7 @@ void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu)
*/
if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) {
vcpu->arch.debug_owner = VCPU_DEBUG_HOST_OWNED;
setup_external_mdscr(vcpu);
} else {
mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);

Expand Down
39 changes: 27 additions & 12 deletions arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,34 @@

static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt);

static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt)
{
struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu;

if (!vcpu)
vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt);

return vcpu;
}

static inline bool ctxt_is_guest(struct kvm_cpu_context *ctxt)
{
return host_data_ptr(host_ctxt) != ctxt;
}

static inline u64 *ctxt_mdscr_el1(struct kvm_cpu_context *ctxt)
{
struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt);

if (ctxt_is_guest(ctxt) && kvm_host_owns_debug_regs(vcpu))
return &vcpu->arch.external_mdscr_el1;

return &ctxt_sys_reg(ctxt, MDSCR_EL1);
}

static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
{
ctxt_sys_reg(ctxt, MDSCR_EL1) = read_sysreg(mdscr_el1);
*ctxt_mdscr_el1(ctxt) = read_sysreg(mdscr_el1);

// POR_EL0 can affect uaccess, so must be saved/restored early.
if (ctxt_has_s1poe(ctxt))
Expand All @@ -33,16 +58,6 @@ static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0);
}

static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt)
{
struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu;

if (!vcpu)
vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt);

return vcpu;
}

static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt)
{
struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt);
Expand Down Expand Up @@ -139,7 +154,7 @@ static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)

static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
{
write_sysreg(ctxt_sys_reg(ctxt, MDSCR_EL1), mdscr_el1);
write_sysreg(*ctxt_mdscr_el1(ctxt), mdscr_el1);

// POR_EL0 can affect uaccess, so must be saved/restored early.
if (ctxt_has_s1poe(ctxt))
Expand Down

0 comments on commit 4ad3a0b

Please sign in to comment.