Skip to content

Commit

Permalink
KVM: PPC: Book3S HV Nested: Fix TM softpatch HFAC interrupt emulation
Browse files Browse the repository at this point in the history
Have the TM softpatch emulation code set up the HFAC interrupt and
return -1 in case an instruction was executed with HFSCR bits clear,
and have the interrupt exit handler fall through to the HFAC handler.
When the L0 is running a nested guest, this ensures the HFAC interrupt
is correctly passed up to the L1.

The "direct guest" exit handler will turn these into PROGILL program
interrupts so functionality in practice will be unchanged. But it's
possible an L1 would want to handle these in a different way.

Also rearrange the FAC interrupt emulation code to match the HFAC format
while here (mainly, adding the FSCR_INTR_CAUSE mask).

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210811160134.904987-5-npiggin@gmail.com
  • Loading branch information
Nicholas Piggin authored and Michael Ellerman committed Aug 25, 2021
1 parent 4782e0c commit d82b392
Show file tree
Hide file tree
Showing 3 changed files with 48 additions and 34 deletions.
3 changes: 2 additions & 1 deletion arch/powerpc/include/asm/reg.h
Original file line number Diff line number Diff line change
Expand Up @@ -415,6 +415,7 @@
#define FSCR_TAR __MASK(FSCR_TAR_LG)
#define FSCR_EBB __MASK(FSCR_EBB_LG)
#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define FSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56) /* interrupt cause */
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
#define HFSCR_PREFIX __MASK(FSCR_PREFIX_LG)
#define HFSCR_MSGP __MASK(FSCR_MSGP_LG)
Expand All @@ -426,7 +427,7 @@
#define HFSCR_DSCR __MASK(FSCR_DSCR_LG)
#define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
#define HFSCR_FP __MASK(FSCR_FP_LG)
#define HFSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56) /* interrupt cause */
#define HFSCR_INTR_CAUSE FSCR_INTR_CAUSE
#define SPRN_TAR 0x32f /* Target Address Register */
#define SPRN_LPCR 0x13E /* LPAR Control Register */
#define LPCR_VPM0 ASM_CONST(0x8000000000000000)
Expand Down
35 changes: 22 additions & 13 deletions arch/powerpc/kvm/book3s_hv.c
Original file line number Diff line number Diff line change
Expand Up @@ -1679,6 +1679,21 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
r = RESUME_GUEST;
}
break;

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case BOOK3S_INTERRUPT_HV_SOFTPATCH:
/*
* This occurs for various TM-related instructions that
* we need to emulate on POWER9 DD2.2. We have already
* handled the cases where the guest was in real-suspend
* mode and was transitioning to transactional state.
*/
r = kvmhv_p9_tm_emulation(vcpu);
if (r != -1)
break;
fallthrough; /* go to facility unavailable handler */
#endif

/*
* This occurs if the guest (kernel or userspace), does something that
* is prohibited by HFSCR.
Expand All @@ -1697,18 +1712,6 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
}
break;

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case BOOK3S_INTERRUPT_HV_SOFTPATCH:
/*
* This occurs for various TM-related instructions that
* we need to emulate on POWER9 DD2.2. We have already
* handled the cases where the guest was in real-suspend
* mode and was transitioning to transactional state.
*/
r = kvmhv_p9_tm_emulation(vcpu);
break;
#endif

case BOOK3S_INTERRUPT_HV_RM_HARD:
r = RESUME_PASSTHROUGH;
break;
Expand Down Expand Up @@ -1811,9 +1814,15 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
* mode and was transitioning to transactional state.
*/
r = kvmhv_p9_tm_emulation(vcpu);
break;
if (r != -1)
break;
fallthrough; /* go to facility unavailable handler */
#endif

case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
r = RESUME_HOST;
break;

case BOOK3S_INTERRUPT_HV_RM_HARD:
vcpu->arch.trap = 0;
r = RESUME_GUEST;
Expand Down
44 changes: 24 additions & 20 deletions arch/powerpc/kvm/book3s_hv_tm.c
Original file line number Diff line number Diff line change
Expand Up @@ -88,14 +88,15 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
}
/* check EBB facility is available */
if (!(vcpu->arch.hfscr & HFSCR_EBB)) {
/* generate an illegal instruction interrupt */
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
return RESUME_GUEST;
vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
vcpu->arch.hfscr |= (u64)FSCR_EBB_LG << 56;
vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
return -1; /* rerun host interrupt handler */
}
if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) {
/* generate a facility unavailable interrupt */
vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
((u64)FSCR_EBB_LG << 56);
vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
vcpu->arch.fscr |= (u64)FSCR_EBB_LG << 56;
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
return RESUME_GUEST;
}
Expand Down Expand Up @@ -138,14 +139,15 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
}
/* check for TM disabled in the HFSCR or MSR */
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
/* generate an illegal instruction interrupt */
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
return RESUME_GUEST;
vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
return -1; /* rerun host interrupt handler */
}
if (!(msr & MSR_TM)) {
/* generate a facility unavailable interrupt */
vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
((u64)FSCR_TM_LG << 56);
vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_FAC_UNAVAIL);
return RESUME_GUEST;
Expand All @@ -169,14 +171,15 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK):
/* check for TM disabled in the HFSCR or MSR */
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
/* generate an illegal instruction interrupt */
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
return RESUME_GUEST;
vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
return -1; /* rerun host interrupt handler */
}
if (!(msr & MSR_TM)) {
/* generate a facility unavailable interrupt */
vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
((u64)FSCR_TM_LG << 56);
vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_FAC_UNAVAIL);
return RESUME_GUEST;
Expand Down Expand Up @@ -208,14 +211,15 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
/* XXX do we need to check for PR=0 here? */
/* check for TM disabled in the HFSCR or MSR */
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
/* generate an illegal instruction interrupt */
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
return RESUME_GUEST;
vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
return -1; /* rerun host interrupt handler */
}
if (!(msr & MSR_TM)) {
/* generate a facility unavailable interrupt */
vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
((u64)FSCR_TM_LG << 56);
vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_FAC_UNAVAIL);
return RESUME_GUEST;
Expand Down

0 comments on commit d82b392

Please sign in to comment.