Skip to content

Commit

Permalink
x86/KVM/VMX: Split the VMX MSR LOAD structures to have an host/guest …
Browse files Browse the repository at this point in the history
…numbers

There is no semantic change but this change allows an unbalanced amount of
MSRs to be loaded on VMEXIT and VMENTER, i.e. the number of MSRs to save or
restore on VMEXIT or VMENTER may be different.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
  • Loading branch information
Konrad Rzeszutek Wilk authored and Thomas Gleixner committed Jul 4, 2018
1 parent c595cee commit 33966dd
Showing 1 changed file with 35 additions and 30 deletions.
65 changes: 35 additions & 30 deletions arch/x86/kvm/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -808,6 +808,11 @@ static inline int pi_test_sn(struct pi_desc *pi_desc)
(unsigned long *)&pi_desc->control);
}

struct vmx_msrs {
unsigned int nr;
struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
};

struct vcpu_vmx {
struct kvm_vcpu vcpu;
unsigned long host_rsp;
Expand Down Expand Up @@ -841,9 +846,8 @@ struct vcpu_vmx {
struct loaded_vmcs *loaded_vmcs;
bool __launched; /* temporary, used in vmx_vcpu_run */
struct msr_autoload {
unsigned nr;
struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
struct vmx_msrs guest;
struct vmx_msrs host;
} msr_autoload;
struct {
int loaded;
Expand Down Expand Up @@ -2440,18 +2444,18 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
}
break;
}

for (i = 0; i < m->nr; ++i)
if (m->guest[i].index == msr)
for (i = 0; i < m->guest.nr; ++i)
if (m->guest.val[i].index == msr)
break;

if (i == m->nr)
if (i == m->guest.nr)
return;
--m->nr;
m->guest[i] = m->guest[m->nr];
m->host[i] = m->host[m->nr];
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
--m->guest.nr;
--m->host.nr;
m->guest.val[i] = m->guest.val[m->guest.nr];
m->host.val[i] = m->host.val[m->host.nr];
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
}

static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
Expand Down Expand Up @@ -2503,24 +2507,25 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
}

for (i = 0; i < m->nr; ++i)
if (m->guest[i].index == msr)
for (i = 0; i < m->guest.nr; ++i)
if (m->guest.val[i].index == msr)
break;

if (i == NR_AUTOLOAD_MSRS) {
printk_once(KERN_WARNING "Not enough msr switch entries. "
"Can't add msr %x\n", msr);
return;
} else if (i == m->nr) {
++m->nr;
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
} else if (i == m->guest.nr) {
++m->guest.nr;
++m->host.nr;
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
}

m->guest[i].index = msr;
m->guest[i].value = guest_val;
m->host[i].index = msr;
m->host[i].value = host_val;
m->guest.val[i].index = msr;
m->guest.val[i].value = guest_val;
m->host.val[i].index = msr;
m->host.val[i].value = host_val;
}

static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
Expand Down Expand Up @@ -6290,9 +6295,9 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)

vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));

if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
Expand Down Expand Up @@ -11350,10 +11355,10 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
* Set the MSR load/store lists to match L0's settings.
*/
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));

set_cr4_guest_host_mask(vmx);

Expand Down Expand Up @@ -12457,8 +12462,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vmx_segment_cache_clear(vmx);

/* Update any VMCS fields that might have changed while L2 ran */
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
if (vmx->hv_deadline_tsc == -1)
vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
Expand Down

0 comments on commit 33966dd

Please sign in to comment.