Skip to content

Commit

Permalink
KVM: VMX: Move common fields of struct vcpu_{vmx,tdx} to a struct
Browse files Browse the repository at this point in the history
Move common fields of struct vcpu_vmx and struct vcpu_tdx to struct
vcpu_vt, to share the code between VMX/TDX as much as possible and to make
TDX exit handling more VMX like.

No functional change intended.

[Adrian: move code that depends on struct vcpu_vmx back to vmx.h]

Suggested-by: Sean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/Z1suNzg2Or743a7e@google.com
Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com>
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Message-ID: <20250129095902.16391-5-adrian.hunter@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Binbin Wu authored and Paolo Bonzini committed Mar 14, 2025
1 parent 69e23fa commit 7172c75
Show file tree
Hide file tree
Showing 7 changed files with 176 additions and 136 deletions.
69 changes: 69 additions & 0 deletions arch/x86/kvm/vmx/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,78 @@
#define __KVM_X86_VMX_COMMON_H

#include <linux/kvm_host.h>
#include <asm/posted_intr.h>

#include "mmu.h"

union vmx_exit_reason {
struct {
u32 basic : 16;
u32 reserved16 : 1;
u32 reserved17 : 1;
u32 reserved18 : 1;
u32 reserved19 : 1;
u32 reserved20 : 1;
u32 reserved21 : 1;
u32 reserved22 : 1;
u32 reserved23 : 1;
u32 reserved24 : 1;
u32 reserved25 : 1;
u32 bus_lock_detected : 1;
u32 enclave_mode : 1;
u32 smi_pending_mtf : 1;
u32 smi_from_vmx_root : 1;
u32 reserved30 : 1;
u32 failed_vmentry : 1;
};
u32 full;
};

struct vcpu_vt {
/* Posted interrupt descriptor */
struct pi_desc pi_desc;

/* Used if this vCPU is waiting for PI notification wakeup. */
struct list_head pi_wakeup_list;

union vmx_exit_reason exit_reason;

unsigned long exit_qualification;
u32 exit_intr_info;

/*
* If true, guest state has been loaded into hardware, and host state
* saved into vcpu_{vt,vmx,tdx}. If false, host state is loaded into
* hardware.
*/
bool guest_state_loaded;

#ifdef CONFIG_X86_64
u64 msr_host_kernel_gs_base;
#endif

unsigned long host_debugctlmsr;
};

#ifdef CONFIG_KVM_INTEL_TDX

static __always_inline bool is_td(struct kvm *kvm)
{
return kvm->arch.vm_type == KVM_X86_TDX_VM;
}

static __always_inline bool is_td_vcpu(struct kvm_vcpu *vcpu)
{
return is_td(vcpu->kvm);
}

#else

static inline bool is_td(struct kvm *kvm) { return false; }
static inline bool is_td_vcpu(struct kvm_vcpu *vcpu) { return false; }

#endif

static inline bool vt_is_tdx_private_gpa(struct kvm *kvm, gpa_t gpa)
{
/* For TDX the direct mask is the shared mask. */
Expand Down
4 changes: 4 additions & 0 deletions arch/x86/kvm/vmx/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,10 @@
#include "tdx.h"
#include "tdx_arch.h"

#ifdef CONFIG_KVM_INTEL_TDX
static_assert(offsetof(struct vcpu_vmx, vt) == offsetof(struct vcpu_tdx, vt));
#endif

static void vt_disable_virtualization_cpu(void)
{
/* Note, TDX *and* VMX need to be disabled if TDX is enabled. */
Expand Down
10 changes: 5 additions & 5 deletions arch/x86/kvm/vmx/nested.c
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
{
struct vmcs_host_state *dest, *src;

if (unlikely(!vmx->guest_state_loaded))
if (unlikely(!vmx->vt.guest_state_loaded))
return;

src = &prev->host_state;
Expand Down Expand Up @@ -425,7 +425,7 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
* tables also changed, but KVM should not treat EPT Misconfig
* VM-Exits as writes.
*/
WARN_ON_ONCE(vmx->exit_reason.basic != EXIT_REASON_EPT_VIOLATION);
WARN_ON_ONCE(vmx->vt.exit_reason.basic != EXIT_REASON_EPT_VIOLATION);

/*
* PML Full and EPT Violation VM-Exits both use bit 12 to report
Expand Down Expand Up @@ -4622,7 +4622,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
{
/* update exit information fields: */
vmcs12->vm_exit_reason = vm_exit_reason;
if (to_vmx(vcpu)->exit_reason.enclave_mode)
if (vmx_get_exit_reason(vcpu).enclave_mode)
vmcs12->vm_exit_reason |= VMX_EXIT_REASONS_SGX_ENCLAVE_MODE;
vmcs12->exit_qualification = exit_qualification;

Expand Down Expand Up @@ -6126,7 +6126,7 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
* nested VM-Exit. Pass the original exit reason, i.e. don't hardcode
* EXIT_REASON_VMFUNC as the exit reason.
*/
nested_vmx_vmexit(vcpu, vmx->exit_reason.full,
nested_vmx_vmexit(vcpu, vmx->vt.exit_reason.full,
vmx_get_intr_info(vcpu),
vmx_get_exit_qual(vcpu));
return 1;
Expand Down Expand Up @@ -6571,7 +6571,7 @@ static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu,
bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
union vmx_exit_reason exit_reason = vmx->exit_reason;
union vmx_exit_reason exit_reason = vmx->vt.exit_reason;
unsigned long exit_qual;
u32 exit_intr_info;

Expand Down
18 changes: 9 additions & 9 deletions arch/x86/kvm/vmx/posted_intr.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ static DEFINE_PER_CPU(raw_spinlock_t, wakeup_vcpus_on_cpu_lock);

static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
{
return &(to_vmx(vcpu)->pi_desc);
return &(to_vt(vcpu)->pi_desc);
}

static int pi_try_set_control(struct pi_desc *pi_desc, u64 *pold, u64 new)
Expand All @@ -53,7 +53,7 @@ static int pi_try_set_control(struct pi_desc *pi_desc, u64 *pold, u64 new)
void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
{
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vcpu_vt *vt = to_vt(vcpu);
struct pi_desc old, new;
unsigned long flags;
unsigned int dest;
Expand Down Expand Up @@ -90,7 +90,7 @@ void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
*/
if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR) {
raw_spin_lock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
list_del(&vmx->pi_wakeup_list);
list_del(&vt->pi_wakeup_list);
raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
}

Expand Down Expand Up @@ -146,14 +146,14 @@ static bool vmx_can_use_vtd_pi(struct kvm *kvm)
static void pi_enable_wakeup_handler(struct kvm_vcpu *vcpu)
{
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vcpu_vt *vt = to_vt(vcpu);
struct pi_desc old, new;
unsigned long flags;

local_irq_save(flags);

raw_spin_lock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
list_add_tail(&vmx->pi_wakeup_list,
list_add_tail(&vt->pi_wakeup_list,
&per_cpu(wakeup_vcpus_on_cpu, vcpu->cpu));
raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));

Expand Down Expand Up @@ -220,13 +220,13 @@ void pi_wakeup_handler(void)
int cpu = smp_processor_id();
struct list_head *wakeup_list = &per_cpu(wakeup_vcpus_on_cpu, cpu);
raw_spinlock_t *spinlock = &per_cpu(wakeup_vcpus_on_cpu_lock, cpu);
struct vcpu_vmx *vmx;
struct vcpu_vt *vt;

raw_spin_lock(spinlock);
list_for_each_entry(vmx, wakeup_list, pi_wakeup_list) {
list_for_each_entry(vt, wakeup_list, pi_wakeup_list) {

if (pi_test_on(&vmx->pi_desc))
kvm_vcpu_wake_up(&vmx->vcpu);
if (pi_test_on(&vt->pi_desc))
kvm_vcpu_wake_up(vt_to_vcpu(vt));
}
raw_spin_unlock(spinlock);
}
Expand Down
16 changes: 3 additions & 13 deletions arch/x86/kvm/vmx/tdx.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@
#include "tdx_errno.h"

#ifdef CONFIG_KVM_INTEL_TDX
#include "common.h"

int tdx_bringup(void);
void tdx_cleanup(void);

Expand Down Expand Up @@ -45,6 +47,7 @@ enum vcpu_tdx_state {

struct vcpu_tdx {
struct kvm_vcpu vcpu;
struct vcpu_vt vt;

struct tdx_vp vp;

Expand All @@ -57,16 +60,6 @@ void tdh_vp_rd_failed(struct vcpu_tdx *tdx, char *uclass, u32 field, u64 err);
void tdh_vp_wr_failed(struct vcpu_tdx *tdx, char *uclass, char *op, u32 field,
u64 val, u64 err);

static inline bool is_td(struct kvm *kvm)
{
return kvm->arch.vm_type == KVM_X86_TDX_VM;
}

static inline bool is_td_vcpu(struct kvm_vcpu *vcpu)
{
return is_td(vcpu->kvm);
}

static __always_inline u64 td_tdcs_exec_read64(struct kvm_tdx *kvm_tdx, u32 field)
{
u64 err, data;
Expand Down Expand Up @@ -176,9 +169,6 @@ struct vcpu_tdx {
struct kvm_vcpu vcpu;
};

static inline bool is_td(struct kvm *kvm) { return false; }
static inline bool is_td_vcpu(struct kvm_vcpu *vcpu) { return false; }

#endif

#endif
Loading

0 comments on commit 7172c75

Please sign in to comment.