Skip to content

Commit

Permalink
Merge tag 'kvmarm-fixes-6.16-3' of git://git.kernel.org/pub/scm/linux…
Browse files Browse the repository at this point in the history
…/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 fixes for 6.16, take #3

- Fix another set of FP/SIMD/SVE bugs affecting NV, and plugging some
  missing synchronisation

- A small fix for the irqbypass hook fixes, tightening the check and
  ensuring that we only deal with MSI for both the old and the new
  route entry

- Rework the way the shadow LRs are addressed in a nesting
  configuration, plugging an embarrassing bug as well as simplifying
  the whole process

- Add yet another fix for the dreaded arch_timer_edge_cases selftest
  • Loading branch information
Paolo Bonzini committed Jun 20, 2025
2 parents e04c78d + 04c5355 commit a09e359
Show file tree
Hide file tree
Showing 9 changed files with 215 additions and 271 deletions.
62 changes: 0 additions & 62 deletions arch/arm64/include/asm/kvm_emulate.h
Original file line number Diff line number Diff line change
Expand Up @@ -561,68 +561,6 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
vcpu_set_flag((v), e); \
} while (0)

#define __build_check_all_or_none(r, bits) \
BUILD_BUG_ON(((r) & (bits)) && ((r) & (bits)) != (bits))

#define __cpacr_to_cptr_clr(clr, set) \
({ \
u64 cptr = 0; \
\
if ((set) & CPACR_EL1_FPEN) \
cptr |= CPTR_EL2_TFP; \
if ((set) & CPACR_EL1_ZEN) \
cptr |= CPTR_EL2_TZ; \
if ((set) & CPACR_EL1_SMEN) \
cptr |= CPTR_EL2_TSM; \
if ((clr) & CPACR_EL1_TTA) \
cptr |= CPTR_EL2_TTA; \
if ((clr) & CPTR_EL2_TAM) \
cptr |= CPTR_EL2_TAM; \
if ((clr) & CPTR_EL2_TCPAC) \
cptr |= CPTR_EL2_TCPAC; \
\
cptr; \
})

#define __cpacr_to_cptr_set(clr, set) \
({ \
u64 cptr = 0; \
\
if ((clr) & CPACR_EL1_FPEN) \
cptr |= CPTR_EL2_TFP; \
if ((clr) & CPACR_EL1_ZEN) \
cptr |= CPTR_EL2_TZ; \
if ((clr) & CPACR_EL1_SMEN) \
cptr |= CPTR_EL2_TSM; \
if ((set) & CPACR_EL1_TTA) \
cptr |= CPTR_EL2_TTA; \
if ((set) & CPTR_EL2_TAM) \
cptr |= CPTR_EL2_TAM; \
if ((set) & CPTR_EL2_TCPAC) \
cptr |= CPTR_EL2_TCPAC; \
\
cptr; \
})

#define cpacr_clear_set(clr, set) \
do { \
BUILD_BUG_ON((set) & CPTR_VHE_EL2_RES0); \
BUILD_BUG_ON((clr) & CPACR_EL1_E0POE); \
__build_check_all_or_none((clr), CPACR_EL1_FPEN); \
__build_check_all_or_none((set), CPACR_EL1_FPEN); \
__build_check_all_or_none((clr), CPACR_EL1_ZEN); \
__build_check_all_or_none((set), CPACR_EL1_ZEN); \
__build_check_all_or_none((clr), CPACR_EL1_SMEN); \
__build_check_all_or_none((set), CPACR_EL1_SMEN); \
\
if (has_vhe() || has_hvhe()) \
sysreg_clear_set(cpacr_el1, clr, set); \
else \
sysreg_clear_set(cptr_el2, \
__cpacr_to_cptr_clr(clr, set), \
__cpacr_to_cptr_set(clr, set));\
} while (0)

/*
* Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE
* format if E2H isn't set.
Expand Down
6 changes: 2 additions & 4 deletions arch/arm64/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -1289,9 +1289,8 @@ void kvm_arm_resume_guest(struct kvm *kvm);
})

/*
* The couple of isb() below are there to guarantee the same behaviour
* on VHE as on !VHE, where the eret to EL1 acts as a context
* synchronization event.
* The isb() below is there to guarantee the same behaviour on VHE as on !VHE,
* where the eret to EL1 acts as a context synchronization event.
*/
#define kvm_call_hyp(f, ...) \
do { \
Expand All @@ -1309,7 +1308,6 @@ void kvm_arm_resume_guest(struct kvm *kvm);
\
if (has_vhe()) { \
ret = f(__VA_ARGS__); \
isb(); \
} else { \
ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
} \
Expand Down
3 changes: 2 additions & 1 deletion arch/arm64/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -2764,7 +2764,8 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old,
struct kvm_kernel_irq_routing_entry *new)
{
if (new->type != KVM_IRQ_ROUTING_MSI)
if (old->type != KVM_IRQ_ROUTING_MSI ||
new->type != KVM_IRQ_ROUTING_MSI)
return true;

return memcmp(&old->msi, &new->msi, sizeof(new->msi));
Expand Down
147 changes: 138 additions & 9 deletions arch/arm64/kvm/hyp/include/hyp/switch.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,136 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
}
}

static inline void __activate_cptr_traps_nvhe(struct kvm_vcpu *vcpu)
{
u64 val = CPTR_NVHE_EL2_RES1 | CPTR_EL2_TAM | CPTR_EL2_TTA;

/*
* Always trap SME since it's not supported in KVM.
* TSM is RES1 if SME isn't implemented.
*/
val |= CPTR_EL2_TSM;

if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
val |= CPTR_EL2_TZ;

if (!guest_owns_fp_regs())
val |= CPTR_EL2_TFP;

write_sysreg(val, cptr_el2);
}

static inline void __activate_cptr_traps_vhe(struct kvm_vcpu *vcpu)
{
/*
* With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
* CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2,
* except for some missing controls, such as TAM.
* In this case, CPTR_EL2.TAM has the same position with or without
* VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM
* shift value for trapping the AMU accesses.
*/
u64 val = CPTR_EL2_TAM | CPACR_EL1_TTA;
u64 cptr;

if (guest_owns_fp_regs()) {
val |= CPACR_EL1_FPEN;
if (vcpu_has_sve(vcpu))
val |= CPACR_EL1_ZEN;
}

if (!vcpu_has_nv(vcpu))
goto write;

/*
* The architecture is a bit crap (what a surprise): an EL2 guest
* writing to CPTR_EL2 via CPACR_EL1 can't set any of TCPAC or TTA,
* as they are RES0 in the guest's view. To work around it, trap the
* sucker using the very same bit it can't set...
*/
if (vcpu_el2_e2h_is_set(vcpu) && is_hyp_ctxt(vcpu))
val |= CPTR_EL2_TCPAC;

/*
* Layer the guest hypervisor's trap configuration on top of our own if
* we're in a nested context.
*/
if (is_hyp_ctxt(vcpu))
goto write;

cptr = vcpu_sanitised_cptr_el2(vcpu);

/*
* Pay attention, there's some interesting detail here.
*
* The CPTR_EL2.xEN fields are 2 bits wide, although there are only two
* meaningful trap states when HCR_EL2.TGE = 0 (running a nested guest):
*
* - CPTR_EL2.xEN = x0, traps are enabled
* - CPTR_EL2.xEN = x1, traps are disabled
*
* In other words, bit[0] determines if guest accesses trap or not. In
* the interest of simplicity, clear the entire field if the guest
* hypervisor has traps enabled to dispel any illusion of something more
* complicated taking place.
*/
if (!(SYS_FIELD_GET(CPACR_EL1, FPEN, cptr) & BIT(0)))
val &= ~CPACR_EL1_FPEN;
if (!(SYS_FIELD_GET(CPACR_EL1, ZEN, cptr) & BIT(0)))
val &= ~CPACR_EL1_ZEN;

if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S2POE, IMP))
val |= cptr & CPACR_EL1_E0POE;

val |= cptr & CPTR_EL2_TCPAC;

write:
write_sysreg(val, cpacr_el1);
}

static inline void __activate_cptr_traps(struct kvm_vcpu *vcpu)
{
if (!guest_owns_fp_regs())
__activate_traps_fpsimd32(vcpu);

if (has_vhe() || has_hvhe())
__activate_cptr_traps_vhe(vcpu);
else
__activate_cptr_traps_nvhe(vcpu);
}

static inline void __deactivate_cptr_traps_nvhe(struct kvm_vcpu *vcpu)
{
u64 val = CPTR_NVHE_EL2_RES1;

if (!cpus_have_final_cap(ARM64_SVE))
val |= CPTR_EL2_TZ;
if (!cpus_have_final_cap(ARM64_SME))
val |= CPTR_EL2_TSM;

write_sysreg(val, cptr_el2);
}

static inline void __deactivate_cptr_traps_vhe(struct kvm_vcpu *vcpu)
{
u64 val = CPACR_EL1_FPEN;

if (cpus_have_final_cap(ARM64_SVE))
val |= CPACR_EL1_ZEN;
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN;

write_sysreg(val, cpacr_el1);
}

static inline void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
{
if (has_vhe() || has_hvhe())
__deactivate_cptr_traps_vhe(vcpu);
else
__deactivate_cptr_traps_nvhe(vcpu);
}

#define reg_to_fgt_masks(reg) \
({ \
struct fgt_masks *m; \
Expand Down Expand Up @@ -486,11 +616,6 @@ static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
*/
if (system_supports_sve()) {
__hyp_sve_save_host();

/* Re-enable SVE traps if not supported for the guest vcpu. */
if (!vcpu_has_sve(vcpu))
cpacr_clear_set(CPACR_EL1_ZEN, 0);

} else {
__fpsimd_save_state(host_data_ptr(host_ctxt.fp_regs));
}
Expand Down Expand Up @@ -541,10 +666,7 @@ static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
/* Valid trap. Switch the context: */

/* First disable enough traps to allow us to update the registers */
if (sve_guest || (is_protected_kvm_enabled() && system_supports_sve()))
cpacr_clear_set(0, CPACR_EL1_FPEN | CPACR_EL1_ZEN);
else
cpacr_clear_set(0, CPACR_EL1_FPEN);
__deactivate_cptr_traps(vcpu);
isb();

/* Write out the host state if it's in the registers */
Expand All @@ -566,6 +688,13 @@ static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)

*host_data_ptr(fp_owner) = FP_STATE_GUEST_OWNED;

/*
* Re-enable traps necessary for the current state of the guest, e.g.
* those enabled by a guest hypervisor. The ERET to the guest will
* provide the necessary context synchronization.
*/
__activate_cptr_traps(vcpu);

return true;
}

Expand Down
5 changes: 4 additions & 1 deletion arch/arm64/kvm/hyp/nvhe/hyp-main.c
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,10 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
if (!guest_owns_fp_regs())
return;

cpacr_clear_set(0, CPACR_EL1_FPEN | CPACR_EL1_ZEN);
/*
* Traps have been disabled by __deactivate_cptr_traps(), but there
* hasn't necessarily been a context synchronization event yet.
*/
isb();

if (vcpu_has_sve(vcpu))
Expand Down
59 changes: 0 additions & 59 deletions arch/arm64/kvm/hyp/nvhe/switch.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,65 +47,6 @@ struct fgt_masks hdfgwtr2_masks;

extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);

static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
{
u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */

if (!guest_owns_fp_regs())
__activate_traps_fpsimd32(vcpu);

if (has_hvhe()) {
val |= CPACR_EL1_TTA;

if (guest_owns_fp_regs()) {
val |= CPACR_EL1_FPEN;
if (vcpu_has_sve(vcpu))
val |= CPACR_EL1_ZEN;
}

write_sysreg(val, cpacr_el1);
} else {
val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;

/*
* Always trap SME since it's not supported in KVM.
* TSM is RES1 if SME isn't implemented.
*/
val |= CPTR_EL2_TSM;

if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
val |= CPTR_EL2_TZ;

if (!guest_owns_fp_regs())
val |= CPTR_EL2_TFP;

write_sysreg(val, cptr_el2);
}
}

static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
{
if (has_hvhe()) {
u64 val = CPACR_EL1_FPEN;

if (cpus_have_final_cap(ARM64_SVE))
val |= CPACR_EL1_ZEN;
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN;

write_sysreg(val, cpacr_el1);
} else {
u64 val = CPTR_NVHE_EL2_RES1;

if (!cpus_have_final_cap(ARM64_SVE))
val |= CPTR_EL2_TZ;
if (!cpus_have_final_cap(ARM64_SME))
val |= CPTR_EL2_TSM;

write_sysreg(val, cptr_el2);
}
}

static void __activate_traps(struct kvm_vcpu *vcpu)
{
___activate_traps(vcpu, vcpu->arch.hcr_el2);
Expand Down
Loading

0 comments on commit a09e359

Please sign in to comment.