Skip to content

Commit

Permalink
KVM: x86: hyper-v: Move the remote TLB flush logic out of vmx
Browse files Browse the repository at this point in the history
Currently the remote TLB flush logic is specific to VMX.
Move it to a common place so that SVM can use it as well.

Signed-off-by: Vineeth Pillai <viremana@linux.microsoft.com>
Message-Id: <4f4e4ca19778437dae502f44363a38e99e3ef5d1.1622730232.git.viremana@linux.microsoft.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Vineeth Pillai authored and Paolo Bonzini committed Jun 17, 2021
1 parent 32431fb commit 3c86c0d
Show file tree
Hide file tree
Showing 7 changed files with 150 additions and 112 deletions.
9 changes: 9 additions & 0 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -852,6 +852,10 @@ struct kvm_vcpu_arch {

/* Protected Guests */
bool guest_state_protected;

#if IS_ENABLED(CONFIG_HYPERV)
hpa_t hv_root_tdp;
#endif
};

struct kvm_lpage_info {
Expand Down Expand Up @@ -1131,6 +1135,11 @@ struct kvm_arch {
* allocated for any newly created or modified memslots.
*/
bool memslots_have_rmaps;

#if IS_ENABLED(CONFIG_HYPERV)
hpa_t hv_root_tdp;
spinlock_t hv_root_tdp_lock;
#endif
};

struct kvm_vm_stat {
Expand Down
5 changes: 5 additions & 0 deletions arch/x86/kvm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,11 @@ kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \
mmu/spte.o

ifdef CONFIG_HYPERV
kvm-y += kvm_onhyperv.o
endif

kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o
kvm-$(CONFIG_KVM_XEN) += xen.o

Expand Down
93 changes: 93 additions & 0 deletions arch/x86/kvm/kvm_onhyperv.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* KVM L1 hypervisor optimizations on Hyper-V.
*/

#include <linux/kvm_host.h>
#include <asm/mshyperv.h>

#include "hyperv.h"
#include "kvm_onhyperv.h"

static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
void *data)
{
struct kvm_tlb_range *range = data;

return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
range->pages);
}

static inline int hv_remote_flush_root_tdp(hpa_t root_tdp,
struct kvm_tlb_range *range)
{
if (range)
return hyperv_flush_guest_mapping_range(root_tdp,
kvm_fill_hv_flush_list_func, (void *)range);
else
return hyperv_flush_guest_mapping(root_tdp);
}

int hv_remote_flush_tlb_with_range(struct kvm *kvm,
struct kvm_tlb_range *range)
{
struct kvm_arch *kvm_arch = &kvm->arch;
struct kvm_vcpu *vcpu;
int ret = 0, i, nr_unique_valid_roots;
hpa_t root;

spin_lock(&kvm_arch->hv_root_tdp_lock);

if (!VALID_PAGE(kvm_arch->hv_root_tdp)) {
nr_unique_valid_roots = 0;

/*
* Flush all valid roots, and see if all vCPUs have converged
* on a common root, in which case future flushes can skip the
* loop and flush the common root.
*/
kvm_for_each_vcpu(i, vcpu, kvm) {
root = vcpu->arch.hv_root_tdp;
if (!VALID_PAGE(root) || root == kvm_arch->hv_root_tdp)
continue;

/*
* Set the tracked root to the first valid root. Keep
* this root for the entirety of the loop even if more
* roots are encountered as a low effort optimization
* to avoid flushing the same (first) root again.
*/
if (++nr_unique_valid_roots == 1)
kvm_arch->hv_root_tdp = root;

if (!ret)
ret = hv_remote_flush_root_tdp(root, range);

/*
* Stop processing roots if a failure occurred and
* multiple valid roots have already been detected.
*/
if (ret && nr_unique_valid_roots > 1)
break;
}

/*
* The optimized flush of a single root can't be used if there
* are multiple valid roots (obviously).
*/
if (nr_unique_valid_roots > 1)
kvm_arch->hv_root_tdp = INVALID_PAGE;
} else {
ret = hv_remote_flush_root_tdp(kvm_arch->hv_root_tdp, range);
}

spin_unlock(&kvm_arch->hv_root_tdp_lock);
return ret;
}
EXPORT_SYMBOL_GPL(hv_remote_flush_tlb_with_range);

int hv_remote_flush_tlb(struct kvm *kvm)
{
return hv_remote_flush_tlb_with_range(kvm, NULL);
}
EXPORT_SYMBOL_GPL(hv_remote_flush_tlb);
32 changes: 32 additions & 0 deletions arch/x86/kvm/kvm_onhyperv.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* KVM L1 hypervisor optimizations on Hyper-V.
*/

#ifndef __ARCH_X86_KVM_KVM_ONHYPERV_H__
#define __ARCH_X86_KVM_KVM_ONHYPERV_H__

#if IS_ENABLED(CONFIG_HYPERV)
int hv_remote_flush_tlb_with_range(struct kvm *kvm,
struct kvm_tlb_range *range);
int hv_remote_flush_tlb(struct kvm *kvm);

static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
{
struct kvm_arch *kvm_arch = &vcpu->kvm->arch;

if (kvm_x86_ops.tlb_remote_flush == hv_remote_flush_tlb) {
spin_lock(&kvm_arch->hv_root_tdp_lock);
vcpu->arch.hv_root_tdp = root_tdp;
if (root_tdp != kvm_arch->hv_root_tdp)
kvm_arch->hv_root_tdp = INVALID_PAGE;
spin_unlock(&kvm_arch->hv_root_tdp_lock);
}
}
#else /* !CONFIG_HYPERV */
static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
{
}
#endif /* !CONFIG_HYPERV */

#endif
105 changes: 2 additions & 103 deletions arch/x86/kvm/vmx/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@
#include "cpuid.h"
#include "evmcs.h"
#include "hyperv.h"
#include "kvm_onhyperv.h"
#include "irq.h"
#include "kvm_cache_regs.h"
#include "lapic.h"
Expand Down Expand Up @@ -458,86 +459,6 @@ static unsigned long host_idt_base;
static bool __read_mostly enlightened_vmcs = true;
module_param(enlightened_vmcs, bool, 0444);

static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
void *data)
{
struct kvm_tlb_range *range = data;

return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
range->pages);
}

static inline int hv_remote_flush_root_ept(hpa_t root_ept,
struct kvm_tlb_range *range)
{
if (range)
return hyperv_flush_guest_mapping_range(root_ept,
kvm_fill_hv_flush_list_func, (void *)range);
else
return hyperv_flush_guest_mapping(root_ept);
}

static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
struct kvm_tlb_range *range)
{
struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
struct kvm_vcpu *vcpu;
int ret = 0, i, nr_unique_valid_roots;
hpa_t root;

spin_lock(&kvm_vmx->hv_root_ept_lock);

if (!VALID_PAGE(kvm_vmx->hv_root_ept)) {
nr_unique_valid_roots = 0;

/*
* Flush all valid roots, and see if all vCPUs have converged
* on a common root, in which case future flushes can skip the
* loop and flush the common root.
*/
kvm_for_each_vcpu(i, vcpu, kvm) {
root = to_vmx(vcpu)->hv_root_ept;
if (!VALID_PAGE(root) || root == kvm_vmx->hv_root_ept)
continue;

/*
* Set the tracked root to the first valid root. Keep
* this root for the entirety of the loop even if more
* roots are encountered as a low effort optimization
* to avoid flushing the same (first) root again.
*/
if (++nr_unique_valid_roots == 1)
kvm_vmx->hv_root_ept = root;

if (!ret)
ret = hv_remote_flush_root_ept(root, range);

/*
* Stop processing roots if a failure occurred and
* multiple valid roots have already been detected.
*/
if (ret && nr_unique_valid_roots > 1)
break;
}

/*
* The optimized flush of a single root can't be used if there
* are multiple valid roots (obviously).
*/
if (nr_unique_valid_roots > 1)
kvm_vmx->hv_root_ept = INVALID_PAGE;
} else {
ret = hv_remote_flush_root_ept(kvm_vmx->hv_root_ept, range);
}

spin_unlock(&kvm_vmx->hv_root_ept_lock);
return ret;
}
static int hv_remote_flush_tlb(struct kvm *kvm)
{
return hv_remote_flush_tlb_with_range(kvm, NULL);
}

static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
{
struct hv_enlightened_vmcs *evmcs;
Expand Down Expand Up @@ -565,21 +486,6 @@ static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)

#endif /* IS_ENABLED(CONFIG_HYPERV) */

static void hv_track_root_ept(struct kvm_vcpu *vcpu, hpa_t root_ept)
{
#if IS_ENABLED(CONFIG_HYPERV)
struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);

if (kvm_x86_ops.tlb_remote_flush == hv_remote_flush_tlb) {
spin_lock(&kvm_vmx->hv_root_ept_lock);
to_vmx(vcpu)->hv_root_ept = root_ept;
if (root_ept != kvm_vmx->hv_root_ept)
kvm_vmx->hv_root_ept = INVALID_PAGE;
spin_unlock(&kvm_vmx->hv_root_ept_lock);
}
#endif
}

/*
* Comment's format: document - errata name - stepping - processor name.
* Refer from
Expand Down Expand Up @@ -3184,7 +3090,7 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
eptp = construct_eptp(vcpu, root_hpa, root_level);
vmcs_write64(EPT_POINTER, eptp);

hv_track_root_ept(vcpu, root_hpa);
hv_track_root_tdp(vcpu, root_hpa);

if (!enable_unrestricted_guest && !is_paging(vcpu))
guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
Expand Down Expand Up @@ -6966,9 +6872,6 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
vmx->pi_desc.nv = POSTED_INTR_VECTOR;
vmx->pi_desc.sn = 1;

#if IS_ENABLED(CONFIG_HYPERV)
vmx->hv_root_ept = INVALID_PAGE;
#endif
return 0;

free_vmcs:
Expand All @@ -6985,10 +6888,6 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)

static int vmx_vm_init(struct kvm *kvm)
{
#if IS_ENABLED(CONFIG_HYPERV)
spin_lock_init(&to_kvm_vmx(kvm)->hv_root_ept_lock);
#endif

if (!ple_gap)
kvm->arch.pause_in_guest = true;

Expand Down
9 changes: 0 additions & 9 deletions arch/x86/kvm/vmx/vmx.h
Original file line number Diff line number Diff line change
Expand Up @@ -334,10 +334,6 @@ struct vcpu_vmx {
/* SGX Launch Control public key hash */
u64 msr_ia32_sgxlepubkeyhash[4];

#if IS_ENABLED(CONFIG_HYPERV)
u64 hv_root_ept;
#endif

struct pt_desc pt_desc;
struct lbr_desc lbr_desc;

Expand All @@ -355,11 +351,6 @@ struct kvm_vmx {
unsigned int tss_addr;
bool ept_identity_pagetable_done;
gpa_t ept_identity_map_addr;

#if IS_ENABLED(CONFIG_HYPERV)
hpa_t hv_root_ept;
spinlock_t hv_root_ept_lock;
#endif
};

bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
Expand Down
9 changes: 9 additions & 0 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -10494,6 +10494,10 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.pending_external_vector = -1;
vcpu->arch.preempted_in_kernel = false;

#if IS_ENABLED(CONFIG_HYPERV)
vcpu->arch.hv_root_tdp = INVALID_PAGE;
#endif

r = static_call(kvm_x86_vcpu_create)(vcpu);
if (r)
goto free_guest_fpu;
Expand Down Expand Up @@ -10878,6 +10882,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)

kvm->arch.guest_can_read_msr_platform_info = true;

#if IS_ENABLED(CONFIG_HYPERV)
spin_lock_init(&kvm->arch.hv_root_tdp_lock);
kvm->arch.hv_root_tdp = INVALID_PAGE;
#endif

INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);

Expand Down

0 comments on commit 3c86c0d

Please sign in to comment.