Skip to content

Commit

Permalink
KVM: x86: Copy kvm_x86_ops by value to eliminate layer of indirection
Browse files Browse the repository at this point in the history
Replace the kvm_x86_ops pointer in common x86 with an instance of the
struct to save one pointer dereference when invoking functions.  Copy the
struct by value to set the ops during kvm_init().

Arbitrarily use kvm_x86_ops.hardware_enable to track whether or not the
ops have been initialized, i.e. a vendor KVM module has been loaded.

Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200321202603.19355-7-sean.j.christopherson@intel.com>
Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Sean Christopherson authored and Paolo Bonzini committed Mar 31, 2020
1 parent 69c6f69 commit afaf0b2
Show file tree
Hide file tree
Showing 15 changed files with 257 additions and 257 deletions.
18 changes: 9 additions & 9 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -1274,22 +1274,22 @@ struct kvm_arch_async_pf {

extern u64 __read_mostly host_efer;

extern struct kvm_x86_ops *kvm_x86_ops;
extern struct kvm_x86_ops kvm_x86_ops;
extern struct kmem_cache *x86_fpu_cache;

#define __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
return __vmalloc(kvm_x86_ops->vm_size,
return __vmalloc(kvm_x86_ops.vm_size,
GFP_KERNEL_ACCOUNT | __GFP_ZERO, PAGE_KERNEL);
}
void kvm_arch_free_vm(struct kvm *kvm);

#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
{
if (kvm_x86_ops->tlb_remote_flush &&
!kvm_x86_ops->tlb_remote_flush(kvm))
if (kvm_x86_ops.tlb_remote_flush &&
!kvm_x86_ops.tlb_remote_flush(kvm))
return 0;
else
return -ENOTSUPP;
Expand Down Expand Up @@ -1375,7 +1375,7 @@ extern u64 kvm_mce_cap_supported;
*
* EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to
* decode the instruction length. For use *only* by
* kvm_x86_ops->skip_emulated_instruction() implementations.
* kvm_x86_ops.skip_emulated_instruction() implementations.
*
* EMULTYPE_ALLOW_RETRY_PF - Set when the emulator should resume the guest to
* retry native execution under certain conditions,
Expand Down Expand Up @@ -1669,14 +1669,14 @@ static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)

static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
if (kvm_x86_ops->vcpu_blocking)
kvm_x86_ops->vcpu_blocking(vcpu);
if (kvm_x86_ops.vcpu_blocking)
kvm_x86_ops.vcpu_blocking(vcpu);
}

static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
if (kvm_x86_ops->vcpu_unblocking)
kvm_x86_ops->vcpu_unblocking(vcpu);
if (kvm_x86_ops.vcpu_unblocking)
kvm_x86_ops.vcpu_unblocking(vcpu);
}

static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kvm/cpuid.c
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
vcpu->arch.cpuid_nent = cpuid->nent;
cpuid_fix_nx_cap(vcpu);
kvm_apic_set_version(vcpu);
kvm_x86_ops->cpuid_update(vcpu);
kvm_x86_ops.cpuid_update(vcpu);
r = kvm_update_cpuid(vcpu);

out:
Expand All @@ -232,7 +232,7 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
goto out;
vcpu->arch.cpuid_nent = cpuid->nent;
kvm_apic_set_version(vcpu);
kvm_x86_ops->cpuid_update(vcpu);
kvm_x86_ops.cpuid_update(vcpu);
r = kvm_update_cpuid(vcpu);
out:
return r;
Expand Down
8 changes: 4 additions & 4 deletions arch/x86/kvm/hyperv.c
Original file line number Diff line number Diff line change
Expand Up @@ -1022,7 +1022,7 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr))
return 1;
kvm_x86_ops->patch_hypercall(vcpu, instructions);
kvm_x86_ops.patch_hypercall(vcpu, instructions);
((unsigned char *)instructions)[3] = 0xc3; /* ret */
if (__copy_to_user((void __user *)addr, instructions, 4))
return 1;
Expand Down Expand Up @@ -1607,7 +1607,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
* hypercall generates UD from non zero cpl and real mode
* per HYPER-V spec
*/
if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
if (kvm_x86_ops.get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
Expand Down Expand Up @@ -1800,8 +1800,8 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
};
int i, nent = ARRAY_SIZE(cpuid_entries);

if (kvm_x86_ops->nested_get_evmcs_version)
evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu);
if (kvm_x86_ops.nested_get_evmcs_version)
evmcs_ver = kvm_x86_ops.nested_get_evmcs_version(vcpu);

/* Skip NESTED_FEATURES if eVMCS is not supported */
if (!evmcs_ver)
Expand Down
10 changes: 5 additions & 5 deletions arch/x86/kvm/kvm_cache_regs.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
return 0;

if (!kvm_register_is_available(vcpu, reg))
kvm_x86_ops->cache_reg(vcpu, reg);
kvm_x86_ops.cache_reg(vcpu, reg);

return vcpu->arch.regs[reg];
}
Expand Down Expand Up @@ -108,7 +108,7 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
might_sleep(); /* on svm */

if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR);
kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_PDPTR);

return vcpu->arch.walk_mmu->pdptrs[index];
}
Expand All @@ -117,7 +117,7 @@ static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
{
ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
if (tmask & vcpu->arch.cr0_guest_owned_bits)
kvm_x86_ops->decache_cr0_guest_bits(vcpu);
kvm_x86_ops.decache_cr0_guest_bits(vcpu);
return vcpu->arch.cr0 & mask;
}

Expand All @@ -130,14 +130,14 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
{
ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
if (tmask & vcpu->arch.cr4_guest_owned_bits)
kvm_x86_ops->decache_cr4_guest_bits(vcpu);
kvm_x86_ops.decache_cr4_guest_bits(vcpu);
return vcpu->arch.cr4 & mask;
}

static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
{
if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_CR3);
kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR3);
return vcpu->arch.cr3;
}

Expand Down
30 changes: 15 additions & 15 deletions arch/x86/kvm/lapic.c
Original file line number Diff line number Diff line change
Expand Up @@ -463,7 +463,7 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
if (unlikely(vcpu->arch.apicv_active)) {
/* need to update RVI */
kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
kvm_x86_ops->hwapic_irr_update(vcpu,
kvm_x86_ops.hwapic_irr_update(vcpu,
apic_find_highest_irr(apic));
} else {
apic->irr_pending = false;
Expand All @@ -488,7 +488,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
* just set SVI.
*/
if (unlikely(vcpu->arch.apicv_active))
kvm_x86_ops->hwapic_isr_update(vcpu, vec);
kvm_x86_ops.hwapic_isr_update(vcpu, vec);
else {
++apic->isr_count;
BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
Expand Down Expand Up @@ -536,7 +536,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
* and must be left alone.
*/
if (unlikely(vcpu->arch.apicv_active))
kvm_x86_ops->hwapic_isr_update(vcpu,
kvm_x86_ops.hwapic_isr_update(vcpu,
apic_find_highest_isr(apic));
else {
--apic->isr_count;
Expand Down Expand Up @@ -674,7 +674,7 @@ static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
{
int highest_irr;
if (apic->vcpu->arch.apicv_active)
highest_irr = kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
highest_irr = kvm_x86_ops.sync_pir_to_irr(apic->vcpu);
else
highest_irr = apic_find_highest_irr(apic);
if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
Expand Down Expand Up @@ -1063,7 +1063,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
apic->regs + APIC_TMR);
}

if (kvm_x86_ops->deliver_posted_interrupt(vcpu, vector)) {
if (kvm_x86_ops.deliver_posted_interrupt(vcpu, vector)) {
kvm_lapic_set_irr(vector, apic);
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
Expand Down Expand Up @@ -1746,7 +1746,7 @@ static void cancel_hv_timer(struct kvm_lapic *apic)
{
WARN_ON(preemptible());
WARN_ON(!apic->lapic_timer.hv_timer_in_use);
kvm_x86_ops->cancel_hv_timer(apic->vcpu);
kvm_x86_ops.cancel_hv_timer(apic->vcpu);
apic->lapic_timer.hv_timer_in_use = false;
}

Expand All @@ -1757,13 +1757,13 @@ static bool start_hv_timer(struct kvm_lapic *apic)
bool expired;

WARN_ON(preemptible());
if (!kvm_x86_ops->set_hv_timer)
if (!kvm_x86_ops.set_hv_timer)
return false;

if (!ktimer->tscdeadline)
return false;

if (kvm_x86_ops->set_hv_timer(vcpu, ktimer->tscdeadline, &expired))
if (kvm_x86_ops.set_hv_timer(vcpu, ktimer->tscdeadline, &expired))
return false;

ktimer->hv_timer_in_use = true;
Expand Down Expand Up @@ -2190,7 +2190,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);

if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE))
kvm_x86_ops->set_virtual_apic_mode(vcpu);
kvm_x86_ops.set_virtual_apic_mode(vcpu);

apic->base_address = apic->vcpu->arch.apic_base &
MSR_IA32_APICBASE_BASE;
Expand Down Expand Up @@ -2268,9 +2268,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
vcpu->arch.pv_eoi.msr_val = 0;
apic_update_ppr(apic);
if (vcpu->arch.apicv_active) {
kvm_x86_ops->apicv_post_state_restore(vcpu);
kvm_x86_ops->hwapic_irr_update(vcpu, -1);
kvm_x86_ops->hwapic_isr_update(vcpu, -1);
kvm_x86_ops.apicv_post_state_restore(vcpu);
kvm_x86_ops.hwapic_irr_update(vcpu, -1);
kvm_x86_ops.hwapic_isr_update(vcpu, -1);
}

vcpu->arch.apic_arb_prio = 0;
Expand Down Expand Up @@ -2521,10 +2521,10 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
kvm_apic_update_apicv(vcpu);
apic->highest_isr_cache = -1;
if (vcpu->arch.apicv_active) {
kvm_x86_ops->apicv_post_state_restore(vcpu);
kvm_x86_ops->hwapic_irr_update(vcpu,
kvm_x86_ops.apicv_post_state_restore(vcpu);
kvm_x86_ops.hwapic_irr_update(vcpu,
apic_find_highest_irr(apic));
kvm_x86_ops->hwapic_isr_update(vcpu,
kvm_x86_ops.hwapic_isr_update(vcpu,
apic_find_highest_isr(apic));
}
kvm_make_request(KVM_REQ_EVENT, vcpu);
Expand Down
8 changes: 4 additions & 4 deletions arch/x86/kvm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,8 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
{
if (VALID_PAGE(vcpu->arch.mmu->root_hpa))
kvm_x86_ops->load_mmu_pgd(vcpu, vcpu->arch.mmu->root_hpa |
kvm_get_active_pcid(vcpu));
kvm_x86_ops.load_mmu_pgd(vcpu, vcpu->arch.mmu->root_hpa |
kvm_get_active_pcid(vcpu));
}

int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
Expand Down Expand Up @@ -170,8 +170,8 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
unsigned pte_access, unsigned pte_pkey,
unsigned pfec)
{
int cpl = kvm_x86_ops->get_cpl(vcpu);
unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
int cpl = kvm_x86_ops.get_cpl(vcpu);
unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);

/*
* If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
Expand Down
32 changes: 16 additions & 16 deletions arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -305,16 +305,16 @@ kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);

static inline bool kvm_available_flush_tlb_with_range(void)
{
return kvm_x86_ops->tlb_remote_flush_with_range;
return kvm_x86_ops.tlb_remote_flush_with_range;
}

static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
struct kvm_tlb_range *range)
{
int ret = -ENOTSUPP;

if (range && kvm_x86_ops->tlb_remote_flush_with_range)
ret = kvm_x86_ops->tlb_remote_flush_with_range(kvm, range);
if (range && kvm_x86_ops.tlb_remote_flush_with_range)
ret = kvm_x86_ops.tlb_remote_flush_with_range(kvm, range);

if (ret)
kvm_flush_remote_tlbs(kvm);
Expand Down Expand Up @@ -1642,7 +1642,7 @@ static bool spte_set_dirty(u64 *sptep)
rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);

/*
* Similar to the !kvm_x86_ops->slot_disable_log_dirty case,
* Similar to the !kvm_x86_ops.slot_disable_log_dirty case,
* do not bother adding back write access to pages marked
* SPTE_AD_WRPROT_ONLY_MASK.
*/
Expand Down Expand Up @@ -1731,8 +1731,8 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
if (kvm_x86_ops->enable_log_dirty_pt_masked)
kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
if (kvm_x86_ops.enable_log_dirty_pt_masked)
kvm_x86_ops.enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
mask);
else
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
Expand All @@ -1747,8 +1747,8 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
*/
int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
{
if (kvm_x86_ops->write_log_dirty)
return kvm_x86_ops->write_log_dirty(vcpu);
if (kvm_x86_ops.write_log_dirty)
return kvm_x86_ops.write_log_dirty(vcpu);

return 0;
}
Expand Down Expand Up @@ -3036,7 +3036,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (level > PT_PAGE_TABLE_LEVEL)
spte |= PT_PAGE_SIZE_MASK;
if (tdp_enabled)
spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
spte |= kvm_x86_ops.get_mt_mask(vcpu, gfn,
kvm_is_mmio_pfn(pfn));

if (host_writable)
Expand Down Expand Up @@ -4909,7 +4909,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);

role.base.ad_disabled = (shadow_accessed_mask == 0);
role.base.level = kvm_x86_ops->get_tdp_level(vcpu);
role.base.level = kvm_x86_ops.get_tdp_level(vcpu);
role.base.direct = true;
role.base.gpte_is_8_bytes = true;

Expand All @@ -4930,7 +4930,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte;
context->shadow_root_level = kvm_x86_ops->get_tdp_level(vcpu);
context->shadow_root_level = kvm_x86_ops.get_tdp_level(vcpu);
context->direct_map = true;
context->get_guest_pgd = get_cr3;
context->get_pdptr = kvm_pdptr_read;
Expand Down Expand Up @@ -5183,7 +5183,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
if (r)
goto out;
kvm_mmu_load_pgd(vcpu);
kvm_x86_ops->tlb_flush(vcpu, true);
kvm_x86_ops.tlb_flush(vcpu, true);
out:
return r;
}
Expand Down Expand Up @@ -5488,7 +5488,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
* guest, with the exception of AMD Erratum 1096 which is unrecoverable.
*/
if (unlikely(insn && !insn_len)) {
if (!kvm_x86_ops->need_emulation_on_page_fault(vcpu))
if (!kvm_x86_ops.need_emulation_on_page_fault(vcpu))
return 1;
}

Expand Down Expand Up @@ -5523,7 +5523,7 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
if (VALID_PAGE(mmu->prev_roots[i].hpa))
mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);

kvm_x86_ops->tlb_flush_gva(vcpu, gva);
kvm_x86_ops.tlb_flush_gva(vcpu, gva);
++vcpu->stat.invlpg;
}
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
Expand All @@ -5548,7 +5548,7 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
}

if (tlb_flush)
kvm_x86_ops->tlb_flush_gva(vcpu, gva);
kvm_x86_ops.tlb_flush_gva(vcpu, gva);

++vcpu->stat.invlpg;

Expand Down Expand Up @@ -5672,7 +5672,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
* SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
* skip allocating the PDP table.
*/
if (tdp_enabled && kvm_x86_ops->get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
if (tdp_enabled && kvm_x86_ops.get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
return 0;

page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
Expand Down
Loading

0 comments on commit afaf0b2

Please sign in to comment.