Skip to content

Commit

Permalink
Merge branch 'kvm-amd-fixes' into HEAD
Browse files Browse the repository at this point in the history
This topic branch will be included in both kvm/master and kvm/next
(for 5.8) in order to simplify testing of kvm/next.
  • Loading branch information
Paolo Bonzini committed May 15, 2020
2 parents 2673cb6 + 3748613 commit f6bfd9c
Show file tree
Hide file tree
Showing 12 changed files with 324 additions and 87 deletions.
4 changes: 2 additions & 2 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -578,6 +578,7 @@ struct kvm_vcpu_arch {
unsigned long cr4;
unsigned long cr4_guest_owned_bits;
unsigned long cr8;
u32 host_pkru;
u32 pkru;
u32 hflags;
u64 efer;
Expand Down Expand Up @@ -1093,8 +1094,6 @@ struct kvm_x86_ops {
void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
u64 (*get_dr6)(struct kvm_vcpu *vcpu);
void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
Expand Down Expand Up @@ -1449,6 +1448,7 @@ bool kvm_rdpmc(struct kvm_vcpu *vcpu);

void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long payload);
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kvm/hyperv.c
Original file line number Diff line number Diff line change
Expand Up @@ -1427,7 +1427,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
*/
kvm_make_vcpus_request_mask(kvm,
KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP,
vcpu_mask, &hv_vcpu->tlb_flush);
NULL, vcpu_mask, &hv_vcpu->tlb_flush);

ret_success:
/* We always do full TLB flush, set rep_done = rep_cnt. */
Expand Down
39 changes: 31 additions & 8 deletions arch/x86/kvm/svm/nested.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include <linux/kernel.h>

#include <asm/msr-index.h>
#include <asm/debugreg.h>

#include "kvm_emulate.h"
#include "trace.h"
Expand Down Expand Up @@ -267,7 +268,7 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
svm->vmcb->save.rsp = nested_vmcb->save.rsp;
svm->vmcb->save.rip = nested_vmcb->save.rip;
svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
svm->vcpu.arch.dr6 = nested_vmcb->save.dr6;
svm->vmcb->save.cpl = nested_vmcb->save.cpl;

svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
Expand Down Expand Up @@ -482,7 +483,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
nested_vmcb->save.rsp = vmcb->save.rsp;
nested_vmcb->save.rax = vmcb->save.rax;
nested_vmcb->save.dr7 = vmcb->save.dr7;
nested_vmcb->save.dr6 = vmcb->save.dr6;
nested_vmcb->save.dr6 = svm->vcpu.arch.dr6;
nested_vmcb->save.cpl = vmcb->save.cpl;

nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
Expand Down Expand Up @@ -606,26 +607,45 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
/* DB exceptions for our internal use must not cause vmexit */
static int nested_svm_intercept_db(struct vcpu_svm *svm)
{
unsigned long dr6;
unsigned long dr6 = svm->vmcb->save.dr6;

/* Always catch it and pass it to userspace if debugging. */
if (svm->vcpu.guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
return NESTED_EXIT_HOST;

/* if we're not singlestepping, it's not ours */
if (!svm->nmi_singlestep)
return NESTED_EXIT_DONE;
goto reflected_db;

/* if it's not a singlestep exception, it's not ours */
if (kvm_get_dr(&svm->vcpu, 6, &dr6))
return NESTED_EXIT_DONE;
if (!(dr6 & DR6_BS))
return NESTED_EXIT_DONE;
goto reflected_db;

/* if the guest is singlestepping, it should get the vmexit */
if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
disable_nmi_singlestep(svm);
return NESTED_EXIT_DONE;
goto reflected_db;
}

/* it's ours, the nested hypervisor must not see this one */
return NESTED_EXIT_HOST;

reflected_db:
/*
* Synchronize guest DR6 here just like in kvm_deliver_exception_payload;
* it will be moved into the nested VMCB by nested_svm_vmexit. Once
* exceptions will be moved to svm_check_nested_events, all this stuff
* will just go away and we could just return NESTED_EXIT_HOST
* unconditionally. db_interception will queue the exception, which
* will be processed by svm_check_nested_events if a nested vmexit is
* required, and we will just use kvm_deliver_exception_payload to copy
* the payload to DR6 before vmexit.
*/
WARN_ON(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT);
svm->vcpu.arch.dr6 &= ~(DR_TRAP_BITS | DR6_RTM);
svm->vcpu.arch.dr6 |= dr6 & ~DR6_FIXED_1;
return NESTED_EXIT_DONE;
}

static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
Expand Down Expand Up @@ -682,6 +702,9 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
if (svm->nested.intercept_exceptions & excp_bits) {
if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
vmexit = nested_svm_intercept_db(svm);
else if (exit_code == SVM_EXIT_EXCP_BASE + BP_VECTOR &&
svm->vcpu.guest_debug & KVM_GUESTDBG_USE_SW_BP)
vmexit = NESTED_EXIT_HOST;
else
vmexit = NESTED_EXIT_DONE;
}
Expand Down
36 changes: 22 additions & 14 deletions arch/x86/kvm/svm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1672,17 +1672,14 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
mark_dirty(svm->vmcb, VMCB_ASID);
}

static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)
{
return to_svm(vcpu)->vmcb->save.dr6;
}

static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb = svm->vmcb;

svm->vmcb->save.dr6 = value;
mark_dirty(svm->vmcb, VMCB_DR);
if (unlikely(value != vmcb->save.dr6)) {
vmcb->save.dr6 = value;
mark_dirty(vmcb, VMCB_DR);
}
}

static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
Expand All @@ -1693,9 +1690,12 @@ static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
get_debugreg(vcpu->arch.db[1], 1);
get_debugreg(vcpu->arch.db[2], 2);
get_debugreg(vcpu->arch.db[3], 3);
vcpu->arch.dr6 = svm_get_dr6(vcpu);
/*
* We cannot reset svm->vmcb->save.dr6 to DR6_FIXED_1|DR6_RTM here,
* because db_interception might need it. We can do it before vmentry.
*/
vcpu->arch.dr6 = svm->vmcb->save.dr6;
vcpu->arch.dr7 = svm->vmcb->save.dr7;

vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
set_dr_intercepts(svm);
}
Expand Down Expand Up @@ -1739,7 +1739,8 @@ static int db_interception(struct vcpu_svm *svm)
if (!(svm->vcpu.guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
!svm->nmi_singlestep) {
kvm_queue_exception(&svm->vcpu, DB_VECTOR);
u32 payload = (svm->vmcb->save.dr6 ^ DR6_RTM) & ~DR6_FIXED_1;
kvm_queue_exception_p(&svm->vcpu, DB_VECTOR, payload);
return 1;
}

Expand Down Expand Up @@ -3317,6 +3318,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)

svm->vmcb->save.cr2 = vcpu->arch.cr2;

/*
* Run with all-zero DR6 unless needed, so that we can get the exact cause
* of a #DB.
*/
if (unlikely(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
svm_set_dr6(svm, vcpu->arch.dr6);
else
svm_set_dr6(svm, DR6_FIXED_1 | DR6_RTM);

clgi();
kvm_load_guest_xsave_state(vcpu);

Expand Down Expand Up @@ -3931,8 +3941,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.set_idt = svm_set_idt,
.get_gdt = svm_get_gdt,
.set_gdt = svm_set_gdt,
.get_dr6 = svm_get_dr6,
.set_dr6 = svm_set_dr6,
.set_dr7 = svm_set_dr7,
.sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
.cache_reg = svm_cache_reg,
Expand Down
41 changes: 4 additions & 37 deletions arch/x86/kvm/vmx/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -1372,7 +1372,6 @@ void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)

vmx_vcpu_pi_load(vcpu, cpu);

vmx->host_pkru = read_pkru();
vmx->host_debugctlmsr = get_debugctlmsr();
}

Expand Down Expand Up @@ -4677,15 +4676,13 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
dr6 = vmcs_readl(EXIT_QUALIFICATION);
if (!(vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
vcpu->arch.dr6 &= ~DR_TRAP_BITS;
vcpu->arch.dr6 |= dr6 | DR6_RTM;
if (is_icebp(intr_info))
WARN_ON(!skip_emulated_instruction(vcpu));

kvm_queue_exception(vcpu, DB_VECTOR);
kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
return 1;
}
kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
/* fall through */
case BP_VECTOR:
Expand Down Expand Up @@ -4929,16 +4926,14 @@ static int handle_dr(struct kvm_vcpu *vcpu)
* guest debugging itself.
*/
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
vcpu->run->debug.arch.dr6 = DR6_BD | DR6_RTM | DR6_FIXED_1;
vcpu->run->debug.arch.dr7 = dr7;
vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
vcpu->run->debug.arch.exception = DB_VECTOR;
vcpu->run->exit_reason = KVM_EXIT_DEBUG;
return 0;
} else {
vcpu->arch.dr6 &= ~DR_TRAP_BITS;
vcpu->arch.dr6 |= DR6_BD | DR6_RTM;
kvm_queue_exception(vcpu, DB_VECTOR);
kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BD);
return 1;
}
}
Expand Down Expand Up @@ -4969,15 +4964,6 @@ static int handle_dr(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu);
}

static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
{
return vcpu->arch.dr6;
}

static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
{
}

static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
{
get_debugreg(vcpu->arch.db[0], 0);
Expand Down Expand Up @@ -6577,11 +6563,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)

kvm_load_guest_xsave_state(vcpu);

if (static_cpu_has(X86_FEATURE_PKU) &&
kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
vcpu->arch.pkru != vmx->host_pkru)
__write_pkru(vcpu->arch.pkru);

pt_guest_enter(vmx);

if (vcpu_to_pmu(vcpu)->version)
Expand Down Expand Up @@ -6671,18 +6652,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)

pt_guest_exit(vmx);

/*
* eager fpu is enabled if PKEY is supported and CR4 is switched
* back on host, so it is safe to read guest PKRU from current
* XSAVE.
*/
if (static_cpu_has(X86_FEATURE_PKU) &&
kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
vcpu->arch.pkru = rdpkru();
if (vcpu->arch.pkru != vmx->host_pkru)
__write_pkru(vmx->host_pkru);
}

kvm_load_host_xsave_state(vcpu);

vmx->nested.nested_run_pending = 0;
Expand Down Expand Up @@ -7740,8 +7709,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.set_idt = vmx_set_idt,
.get_gdt = vmx_get_gdt,
.set_gdt = vmx_set_gdt,
.get_dr6 = vmx_get_dr6,
.set_dr6 = vmx_set_dr6,
.set_dr7 = vmx_set_dr7,
.sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
.cache_reg = vmx_cache_reg,
Expand Down
Loading

0 comments on commit f6bfd9c

Please sign in to comment.