Skip to content

Commit

Permalink
Merge tag 'kvmarm-fixes-5.7-2' of git://git.kernel.org/pub/scm/linux/…
Browse files Browse the repository at this point in the history
…kernel/git/kvmarm/kvmarm into kvm-master

KVM/arm fixes for Linux 5.7, take #2

- Fix compilation with Clang
- Correctly initialize GICv4.1 in the absence of a virtual ITS
- Move SP_EL0 save/restore to the guest entry/exit code
- Handle PC wrap around on 32bit guests, and narrow all 32bit
  registers on userspace access
  • Loading branch information
Paolo Bonzini committed May 4, 2020
2 parents 9e5e19f + 0225fd5 commit 7134fa0
Show file tree
Hide file tree
Showing 7 changed files with 49 additions and 19 deletions.
7 changes: 7 additions & 0 deletions arch/arm64/kvm/guest.c
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,13 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
}

memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));

if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
int i;

for (i = 0; i < 16; i++)
*vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i);
}
out:
return err;
}
Expand Down
23 changes: 23 additions & 0 deletions arch/arm64/kvm/hyp/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
#define CPU_SP_EL0_OFFSET (CPU_XREG_OFFSET(30) + 8)

.text
.pushsection .hyp.text, "ax"
Expand Down Expand Up @@ -47,6 +48,16 @@
ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
.endm

.macro save_sp_el0 ctxt, tmp
mrs \tmp, sp_el0
str \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
.endm

.macro restore_sp_el0 ctxt, tmp
ldr \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
msr sp_el0, \tmp
.endm

/*
* u64 __guest_enter(struct kvm_vcpu *vcpu,
* struct kvm_cpu_context *host_ctxt);
Expand All @@ -60,6 +71,9 @@ SYM_FUNC_START(__guest_enter)
// Store the host regs
save_callee_saved_regs x1

// Save the host's sp_el0
save_sp_el0 x1, x2

// Now the host state is stored if we have a pending RAS SError it must
// affect the host. If any asynchronous exception is pending we defer
// the guest entry. The DSB isn't necessary before v8.2 as any SError
Expand All @@ -83,6 +97,9 @@ alternative_else_nop_endif
// when this feature is enabled for kernel code.
ptrauth_switch_to_guest x29, x0, x1, x2

// Restore the guest's sp_el0
restore_sp_el0 x29, x0

// Restore guest regs x0-x17
ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]
ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]
Expand Down Expand Up @@ -130,6 +147,9 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
// Store the guest regs x18-x29, lr
save_callee_saved_regs x1

// Store the guest's sp_el0
save_sp_el0 x1, x2

get_host_ctxt x2, x3

// Macro ptrauth_switch_to_guest format:
Expand All @@ -139,6 +159,9 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
// when this feature is enabled for kernel code.
ptrauth_switch_to_host x1, x2, x3, x4, x5

// Restore the hosts's sp_el0
restore_sp_el0 x2, x3

// Now restore the host regs
restore_callee_saved_regs x2

Expand Down
1 change: 0 additions & 1 deletion arch/arm64/kvm/hyp/hyp-entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,6 @@ SYM_CODE_END(__hyp_panic)
.macro invalid_vector label, target = __hyp_panic
.align 2
SYM_CODE_START(\label)
\label:
b \target
SYM_CODE_END(\label)
.endm
Expand Down
17 changes: 3 additions & 14 deletions arch/arm64/kvm/hyp/sysreg-sr.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,9 @@
/*
* Non-VHE: Both host and guest must save everything.
*
* VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and pstate,
* which are handled as part of the el2 return state) on every switch.
* VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and
* pstate, which are handled as part of the el2 return state) on every
* switch (sp_el0 is being dealt with in the assembly code).
* tpidr_el0 and tpidrro_el0 only need to be switched when going
* to host userspace or a different VCPU. EL1 registers only need to be
* switched when potentially going to run a different VCPU. The latter two
Expand All @@ -26,12 +27,6 @@
static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
{
ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);

/*
* The host arm64 Linux uses sp_el0 to point to 'current' and it must
* therefore be saved/restored on every entry/exit to/from the guest.
*/
ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
}

static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
Expand Down Expand Up @@ -99,12 +94,6 @@ NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
{
write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);

/*
* The host arm64 Linux uses sp_el0 to point to 'current' and it must
* therefore be saved/restored on every entry/exit to/from the guest.
*/
write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
}

static void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
Expand Down
8 changes: 6 additions & 2 deletions virt/kvm/arm/hyp/aarch32.c
Original file line number Diff line number Diff line change
Expand Up @@ -125,12 +125,16 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
*/
void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
{
u32 pc = *vcpu_pc(vcpu);
bool is_thumb;

is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
if (is_thumb && !is_wide_instr)
*vcpu_pc(vcpu) += 2;
pc += 2;
else
*vcpu_pc(vcpu) += 4;
pc += 4;

*vcpu_pc(vcpu) = pc;

kvm_adjust_itstate(vcpu);
}
9 changes: 8 additions & 1 deletion virt/kvm/arm/vgic/vgic-init.c
Original file line number Diff line number Diff line change
Expand Up @@ -294,8 +294,15 @@ int vgic_init(struct kvm *kvm)
}
}

if (vgic_has_its(kvm)) {
if (vgic_has_its(kvm))
vgic_lpi_translation_cache_init(kvm);

/*
* If we have GICv4.1 enabled, unconditionnaly request enable the
* v4 support so that we get HW-accelerated vSGIs. Otherwise, only
* enable it if we present a virtual ITS to the guest.
*/
if (vgic_supports_direct_msis(kvm)) {
ret = vgic_v4_init(kvm);
if (ret)
goto out;
Expand Down
3 changes: 2 additions & 1 deletion virt/kvm/arm/vgic/vgic-mmio-v3.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,8 @@ bool vgic_has_its(struct kvm *kvm)

bool vgic_supports_direct_msis(struct kvm *kvm)
{
return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm);
return (kvm_vgic_global_state.has_gicv4_1 ||
(kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm)));
}

/*
Expand Down

0 comments on commit 7134fa0

Please sign in to comment.