Skip to content

Commit

Permalink
KVM: x86: pass host_initiated to functions that read MSRs
Browse files Browse the repository at this point in the history
SMBASE is only readable from SMM for the VCPU, but it must be always
accessible if userspace is accessing it.  Thus, all functions that
read MSRs are changed to accept a struct msr_data; the host_initiated
and index fields are pre-initialized, while the data field is filled
on return.

Reviewed-by: Radim Krčmář <rkrcmar@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Paolo Bonzini committed Jun 4, 2015
1 parent 62ef68b commit 609e36d
Show file tree
Hide file tree
Showing 4 changed files with 127 additions and 101 deletions.
6 changes: 3 additions & 3 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -721,7 +721,7 @@ struct kvm_x86_ops {
void (*vcpu_put)(struct kvm_vcpu *vcpu);

void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu);
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
void (*get_segment)(struct kvm_vcpu *vcpu,
Expand Down Expand Up @@ -941,7 +941,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu,

void kvm_enable_efer_bits(u64);
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);

struct x86_emulate_ctxt;
Expand Down Expand Up @@ -970,7 +970,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);

int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);

unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
Expand Down
54 changes: 29 additions & 25 deletions arch/x86/kvm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -3069,91 +3069,95 @@ static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
svm_scale_tsc(vcpu, host_tsc);
}

static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
struct vcpu_svm *svm = to_svm(vcpu);

switch (ecx) {
switch (msr_info->index) {
case MSR_IA32_TSC: {
*data = svm->vmcb->control.tsc_offset +
msr_info->data = svm->vmcb->control.tsc_offset +
svm_scale_tsc(vcpu, native_read_tsc());

break;
}
case MSR_STAR:
*data = svm->vmcb->save.star;
msr_info->data = svm->vmcb->save.star;
break;
#ifdef CONFIG_X86_64
case MSR_LSTAR:
*data = svm->vmcb->save.lstar;
msr_info->data = svm->vmcb->save.lstar;
break;
case MSR_CSTAR:
*data = svm->vmcb->save.cstar;
msr_info->data = svm->vmcb->save.cstar;
break;
case MSR_KERNEL_GS_BASE:
*data = svm->vmcb->save.kernel_gs_base;
msr_info->data = svm->vmcb->save.kernel_gs_base;
break;
case MSR_SYSCALL_MASK:
*data = svm->vmcb->save.sfmask;
msr_info->data = svm->vmcb->save.sfmask;
break;
#endif
case MSR_IA32_SYSENTER_CS:
*data = svm->vmcb->save.sysenter_cs;
msr_info->data = svm->vmcb->save.sysenter_cs;
break;
case MSR_IA32_SYSENTER_EIP:
*data = svm->sysenter_eip;
msr_info->data = svm->sysenter_eip;
break;
case MSR_IA32_SYSENTER_ESP:
*data = svm->sysenter_esp;
msr_info->data = svm->sysenter_esp;
break;
/*
* Nobody will change the following 5 values in the VMCB so we can
* safely return them on rdmsr. They will always be 0 until LBRV is
* implemented.
*/
case MSR_IA32_DEBUGCTLMSR:
*data = svm->vmcb->save.dbgctl;
msr_info->data = svm->vmcb->save.dbgctl;
break;
case MSR_IA32_LASTBRANCHFROMIP:
*data = svm->vmcb->save.br_from;
msr_info->data = svm->vmcb->save.br_from;
break;
case MSR_IA32_LASTBRANCHTOIP:
*data = svm->vmcb->save.br_to;
msr_info->data = svm->vmcb->save.br_to;
break;
case MSR_IA32_LASTINTFROMIP:
*data = svm->vmcb->save.last_excp_from;
msr_info->data = svm->vmcb->save.last_excp_from;
break;
case MSR_IA32_LASTINTTOIP:
*data = svm->vmcb->save.last_excp_to;
msr_info->data = svm->vmcb->save.last_excp_to;
break;
case MSR_VM_HSAVE_PA:
*data = svm->nested.hsave_msr;
msr_info->data = svm->nested.hsave_msr;
break;
case MSR_VM_CR:
*data = svm->nested.vm_cr_msr;
msr_info->data = svm->nested.vm_cr_msr;
break;
case MSR_IA32_UCODE_REV:
*data = 0x01000065;
msr_info->data = 0x01000065;
break;
default:
return kvm_get_msr_common(vcpu, ecx, data);
return kvm_get_msr_common(vcpu, msr_info);
}
return 0;
}

static int rdmsr_interception(struct vcpu_svm *svm)
{
u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
u64 data;
struct msr_data msr_info;

if (svm_get_msr(&svm->vcpu, ecx, &data)) {
msr_info.index = ecx;
msr_info.host_initiated = false;
if (svm_get_msr(&svm->vcpu, &msr_info)) {
trace_kvm_msr_read_ex(ecx);
kvm_inject_gp(&svm->vcpu, 0);
} else {
trace_kvm_msr_read(ecx, data);
trace_kvm_msr_read(ecx, msr_info.data);

kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, data & 0xffffffff);
kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, data >> 32);
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX,
msr_info.data & 0xffffffff);
kvm_register_write(&svm->vcpu, VCPU_REGS_RDX,
msr_info.data >> 32);
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
skip_emulated_instruction(&svm->vcpu);
}
Expand Down
62 changes: 30 additions & 32 deletions arch/x86/kvm/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -2622,76 +2622,69 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
* Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called.
*/
static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
u64 data;
struct shared_msr_entry *msr;

if (!pdata) {
printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
return -EINVAL;
}

switch (msr_index) {
switch (msr_info->index) {
#ifdef CONFIG_X86_64
case MSR_FS_BASE:
data = vmcs_readl(GUEST_FS_BASE);
msr_info->data = vmcs_readl(GUEST_FS_BASE);
break;
case MSR_GS_BASE:
data = vmcs_readl(GUEST_GS_BASE);
msr_info->data = vmcs_readl(GUEST_GS_BASE);
break;
case MSR_KERNEL_GS_BASE:
vmx_load_host_state(to_vmx(vcpu));
data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
break;
#endif
case MSR_EFER:
return kvm_get_msr_common(vcpu, msr_index, pdata);
return kvm_get_msr_common(vcpu, msr_info);
case MSR_IA32_TSC:
data = guest_read_tsc();
msr_info->data = guest_read_tsc();
break;
case MSR_IA32_SYSENTER_CS:
data = vmcs_read32(GUEST_SYSENTER_CS);
msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
break;
case MSR_IA32_SYSENTER_EIP:
data = vmcs_readl(GUEST_SYSENTER_EIP);
msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
break;
case MSR_IA32_SYSENTER_ESP:
data = vmcs_readl(GUEST_SYSENTER_ESP);
msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
break;
case MSR_IA32_BNDCFGS:
if (!vmx_mpx_supported())
return 1;
data = vmcs_read64(GUEST_BNDCFGS);
msr_info->data = vmcs_read64(GUEST_BNDCFGS);
break;
case MSR_IA32_FEATURE_CONTROL:
if (!nested_vmx_allowed(vcpu))
return 1;
data = to_vmx(vcpu)->nested.msr_ia32_feature_control;
msr_info->data = to_vmx(vcpu)->nested.msr_ia32_feature_control;
break;
case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
if (!nested_vmx_allowed(vcpu))
return 1;
return vmx_get_vmx_msr(vcpu, msr_index, pdata);
return vmx_get_vmx_msr(vcpu, msr_info->index, &msr_info->data);
case MSR_IA32_XSS:
if (!vmx_xsaves_supported())
return 1;
data = vcpu->arch.ia32_xss;
msr_info->data = vcpu->arch.ia32_xss;
break;
case MSR_TSC_AUX:
if (!to_vmx(vcpu)->rdtscp_enabled)
return 1;
/* Otherwise falls through */
default:
msr = find_msr_entry(to_vmx(vcpu), msr_index);
msr = find_msr_entry(to_vmx(vcpu), msr_info->index);
if (msr) {
data = msr->data;
msr_info->data = msr->data;
break;
}
return kvm_get_msr_common(vcpu, msr_index, pdata);
return kvm_get_msr_common(vcpu, msr_info);
}

*pdata = data;
return 0;
}

Expand Down Expand Up @@ -5473,19 +5466,21 @@ static int handle_cpuid(struct kvm_vcpu *vcpu)
static int handle_rdmsr(struct kvm_vcpu *vcpu)
{
u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
u64 data;
struct msr_data msr_info;

if (vmx_get_msr(vcpu, ecx, &data)) {
msr_info.index = ecx;
msr_info.host_initiated = false;
if (vmx_get_msr(vcpu, &msr_info)) {
trace_kvm_msr_read_ex(ecx);
kvm_inject_gp(vcpu, 0);
return 1;
}

trace_kvm_msr_read(ecx, data);
trace_kvm_msr_read(ecx, msr_info.data);

/* FIXME: handling of bits 32:63 of rax, rdx */
vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u;
vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u;
skip_emulated_instruction(vcpu);
return 1;
}
Expand Down Expand Up @@ -9147,6 +9142,7 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
struct vmx_msr_entry e;

for (i = 0; i < count; i++) {
struct msr_data msr_info;
if (kvm_read_guest(vcpu->kvm,
gpa + i * sizeof(e),
&e, 2 * sizeof(u32))) {
Expand All @@ -9161,7 +9157,9 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
__func__, i, e.index, e.reserved);
return -EINVAL;
}
if (kvm_get_msr(vcpu, e.index, &e.value)) {
msr_info.host_initiated = false;
msr_info.index = e.index;
if (kvm_get_msr(vcpu, &msr_info)) {
pr_warn_ratelimited(
"%s cannot read MSR (%u, 0x%x)\n",
__func__, i, e.index);
Expand All @@ -9170,10 +9168,10 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
if (kvm_write_guest(vcpu->kvm,
gpa + i * sizeof(e) +
offsetof(struct vmx_msr_entry, value),
&e.value, sizeof(e.value))) {
&msr_info.data, sizeof(msr_info.data))) {
pr_warn_ratelimited(
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
__func__, i, e.index, e.value);
__func__, i, e.index, msr_info.data);
return -EINVAL;
}
}
Expand Down
Loading

0 comments on commit 609e36d

Please sign in to comment.