Skip to content

Commit

Permalink
KVM: arm64: Consume pending SError as early as possible
Browse files Browse the repository at this point in the history
On systems with v8.2 we switch the 'vaxorcism' of guest SError with an
alternative sequence that uses the ESB-instruction, then reads DISR_EL1.
This saves the unmasking and remasking of asynchronous exceptions.

We do this after we've saved the guest registers and restored the
host's. Any SError that becomes pending due to this will be accounted
to the guest, when it actually occurred during host-execution.

Move the ESB-instruction as early as possible. Any guest SError
will become pending due to this ESB-instruction and then consumed to
DISR_EL1 before the host touches anything.

This lets us account for host/guest SError precisely on the guest
exit exception boundary.

Because the ESB-instruction now lands in the preamble section of
the vectors, we need to add it to the unpatched indirect vectors
too, and to any sequence that may be patched in over the top.

The ESB-instruction always lives in the head of the vectors,
to be before any memory write. Whereas the register-store always
lives in the tail.

Signed-off-by: James Morse <james.morse@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
  • Loading branch information
James Morse authored and Marc Zyngier committed Jul 5, 2019
1 parent 5d99437 commit 0e5b9c0
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 5 deletions.
2 changes: 1 addition & 1 deletion arch/arm64/include/asm/kvm_asm.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
* Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
* that jumps over this.
*/
#define KVM_VECTOR_PREAMBLE (1 * AARCH64_INSN_SIZE)
#define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE)

#ifndef __ASSEMBLY__

Expand Down
5 changes: 2 additions & 3 deletions arch/arm64/kvm/hyp/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -127,8 +127,8 @@ ENTRY(__guest_exit)

alternative_if ARM64_HAS_RAS_EXTN
// If we have the RAS extensions we can consume a pending error
// without an unmask-SError and isb.
esb
// without an unmask-SError and isb. The ESB-instruction consumed any
// pending guest error when we took the exception from the guest.
mrs_s x2, SYS_DISR_EL1
str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
cbz x2, 1f
Expand All @@ -146,7 +146,6 @@ alternative_else
mov x5, x0

dsb sy // Synchronize against in-flight ld/st
nop
msr daifclr, #4 // Unmask aborts
alternative_endif

Expand Down
6 changes: 5 additions & 1 deletion arch/arm64/kvm/hyp/hyp-entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -226,6 +226,7 @@ ENDPROC(\label)
.macro valid_vect target
.align 7
661:
esb
stp x0, x1, [sp, #-16]!
662:
b \target
Expand All @@ -237,6 +238,7 @@ check_preamble_length 661b, 662b
.align 7
661:
b \target
nop
662:
ldp x0, x1, [sp], #16
b \target
Expand Down Expand Up @@ -269,7 +271,8 @@ ENDPROC(__kvm_hyp_vector)
#ifdef CONFIG_KVM_INDIRECT_VECTORS
.macro hyp_ventry
.align 7
1: .rept 27
1: esb
.rept 26
nop
.endr
/*
Expand Down Expand Up @@ -317,6 +320,7 @@ ENTRY(__bp_harden_hyp_vecs_end)
.popsection

ENTRY(__smccc_workaround_1_smc_start)
esb
sub sp, sp, #(8 * 4)
stp x2, x3, [sp, #(8 * 0)]
stp x0, x1, [sp, #(8 * 2)]
Expand Down

0 comments on commit 0e5b9c0

Please sign in to comment.