Skip to content

Commit

Permalink
KVM: PPC: Book3S HV P9: Reduce mtmsrd instructions required to save h…
Browse files Browse the repository at this point in the history
…ost SPRs

This reduces the number of mtmsrd required to enable facility bits when
saving/restoring registers, by having the KVM code set all bits up front
rather than using individual facility functions that set their particular
MSR bits.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211123095231.1036501-20-npiggin@gmail.com
  • Loading branch information
Nicholas Piggin authored and Michael Ellerman committed Nov 24, 2021
1 parent 174a3ab commit 34e119c
Show file tree
Hide file tree
Showing 4 changed files with 71 additions and 19 deletions.
2 changes: 2 additions & 0 deletions arch/powerpc/include/asm/switch_to.h
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,8 @@ static inline void clear_task_ebb(struct task_struct *t)
#endif
}

void kvmppc_save_user_regs(void);

extern int set_thread_tidr(struct task_struct *t);

#endif /* _ASM_POWERPC_SWITCH_TO_H */
28 changes: 28 additions & 0 deletions arch/powerpc/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -1156,6 +1156,34 @@ static inline void save_sprs(struct thread_struct *t)
#endif
}

#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
void kvmppc_save_user_regs(void)
{
unsigned long usermsr;

if (!current->thread.regs)
return;

usermsr = current->thread.regs->msr;

if (usermsr & MSR_FP)
save_fpu(current);

if (usermsr & MSR_VEC)
save_altivec(current);

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (usermsr & MSR_TM) {
current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
current->thread.tm_texasr = mfspr(SPRN_TEXASR);
current->thread.regs->msr &= ~MSR_TM;
}
#endif
}
EXPORT_SYMBOL_GPL(kvmppc_save_user_regs);
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */

static inline void restore_sprs(struct thread_struct *old_thread,
struct thread_struct *new_thread)
{
Expand Down
59 changes: 40 additions & 19 deletions arch/powerpc/kvm/book3s_hv.c
Original file line number Diff line number Diff line change
Expand Up @@ -4153,6 +4153,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
struct p9_host_os_sprs host_os_sprs;
s64 dec;
u64 tb, next_timer;
unsigned long msr;
int trap;

WARN_ON_ONCE(vcpu->arch.ceded);
Expand All @@ -4164,8 +4165,23 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
if (next_timer < time_limit)
time_limit = next_timer;

vcpu->arch.ceded = 0;

save_p9_host_os_sprs(&host_os_sprs);

/* MSR bits may have been cleared by context switch */
msr = 0;
if (IS_ENABLED(CONFIG_PPC_FPU))
msr |= MSR_FP;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
msr |= MSR_VEC;
if (cpu_has_feature(CPU_FTR_VSX))
msr |= MSR_VSX;
if (cpu_has_feature(CPU_FTR_TM) ||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
msr |= MSR_TM;
msr = msr_check_and_set(msr);

kvmppc_subcore_enter_guest();

vc->entry_exit_map = 1;
Expand All @@ -4174,12 +4190,13 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu_vpa_increment_dispatch(vcpu);

if (cpu_has_feature(CPU_FTR_TM) ||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
msr = mfmsr(); /* TM restore can update msr */
}

switch_pmu_to_guest(vcpu, &host_os_sprs);

msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
load_fp_state(&vcpu->arch.fp);
#ifdef CONFIG_ALTIVEC
load_vr_state(&vcpu->arch.vr);
Expand Down Expand Up @@ -4288,7 +4305,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,

restore_p9_host_os_sprs(vcpu, &host_os_sprs);

msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
store_fp_state(&vcpu->arch.fp);
#ifdef CONFIG_ALTIVEC
store_vr_state(&vcpu->arch.vr);
Expand Down Expand Up @@ -4851,32 +4867,31 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
unsigned long user_tar = 0;
unsigned int user_vrsave;
struct kvm *kvm;
unsigned long msr;

if (!vcpu->arch.sane) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL;
}

/* No need to go into the guest when all we'll do is come back out */
if (signal_pending(current)) {
run->exit_reason = KVM_EXIT_INTR;
return -EINTR;
}

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* Don't allow entry with a suspended transaction, because
* the guest entry/exit code will lose it.
* If the guest has TM enabled, save away their TM-related SPRs
* (they will get restored by the TM unavailable interrupt).
*/
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
(current->thread.regs->msr & MSR_TM)) {
if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
run->exit_reason = KVM_EXIT_FAIL_ENTRY;
run->fail_entry.hardware_entry_failure_reason = 0;
return -EINVAL;
}
/* Enable TM so we can read the TM SPRs */
mtmsr(mfmsr() | MSR_TM);
current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
current->thread.tm_texasr = mfspr(SPRN_TEXASR);
current->thread.regs->msr &= ~MSR_TM;
}
#endif

Expand All @@ -4891,18 +4906,24 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)

kvmppc_core_prepare_to_enter(vcpu);

/* No need to go into the guest when all we'll do is come back out */
if (signal_pending(current)) {
run->exit_reason = KVM_EXIT_INTR;
return -EINTR;
}

kvm = vcpu->kvm;
atomic_inc(&kvm->arch.vcpus_running);
/* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */
smp_mb();

flush_all_to_thread(current);
msr = 0;
if (IS_ENABLED(CONFIG_PPC_FPU))
msr |= MSR_FP;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
msr |= MSR_VEC;
if (cpu_has_feature(CPU_FTR_VSX))
msr |= MSR_VSX;
if (cpu_has_feature(CPU_FTR_TM) ||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
msr |= MSR_TM;
msr = msr_check_and_set(msr);

kvmppc_save_user_regs();

/* Save userspace EBB and other register values */
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/kvm/book3s_hv_p9_entry.c
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,7 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
vc->tb_offset_applied = vc->tb_offset;
}

/* Could avoid mfmsr by passing around, but probably no big deal */
msr = mfmsr();

host_hfscr = mfspr(SPRN_HFSCR);
Expand Down

0 comments on commit 34e119c

Please sign in to comment.