Skip to content

Commit

Permalink
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Browse files Browse the repository at this point in the history
Pull kvm fixes from Paolo Bonzini:
"ARM:

   - Fix EL2 Stage-1 MMIO mappings where a random address was used

   - Fix SMCCC function number comparison when the SVE hint is set

  RISC-V:

   - Fix KVM_GET_REG_LIST API for ISA_EXT registers

   - Fix reading ISA_EXT register of a missing extension

   - Fix ISA_EXT register handling in get-reg-list test

   - Fix filtering of AIA registers in get-reg-list test

  x86:

   - Fixes for TSC_AUX virtualization

   - Stop zapping page tables asynchronously, since we don't zap them as
     often as before"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: SVM: Do not use user return MSR support for virtualized TSC_AUX
  KVM: SVM: Fix TSC_AUX virtualization setup
  KVM: SVM: INTERCEPT_RDTSCP is never intercepted anyway
  KVM: x86/mmu: Stop zapping invalidated TDP MMU roots asynchronously
  KVM: x86/mmu: Do not filter address spaces in for_each_tdp_mmu_root_yield_safe()
  KVM: x86/mmu: Open code leaf invalidation from mmu_notifier
  KVM: riscv: selftests: Selectively filter-out AIA registers
  KVM: riscv: selftests: Fix ISA_EXT register handling in get-reg-list
  RISC-V: KVM: Fix riscv_vcpu_get_isa_ext_single() for missing extensions
  RISC-V: KVM: Fix KVM_GET_REG_LIST API for ISA_EXT registers
  KVM: selftests: Assert that vasprintf() is successful
  KVM: arm64: nvhe: Ignore SVE hint in SMCCC function ID
  KVM: arm64: Properly return allocated EL2 VA from hyp_alloc_private_va_range()
  • Loading branch information
Linus Torvalds committed Sep 24, 2023
2 parents 5edc6bb + 5804c19 commit 8a511e7
Show file tree
Hide file tree
Showing 20 changed files with 209 additions and 161 deletions.
2 changes: 1 addition & 1 deletion arch/arm64/include/asm/kvm_hyp.h
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu);

u64 __guest_enter(struct kvm_vcpu *vcpu);

bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt);
bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);

#ifdef __KVM_NVHE_HYPERVISOR__
void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kvm/hyp/include/nvhe/ffa.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,6 @@
#define FFA_MAX_FUNC_NUM 0x7F

int hyp_ffa_init(void *pages);
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt);
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);

#endif /* __KVM_HYP_FFA_H */
3 changes: 1 addition & 2 deletions arch/arm64/kvm/hyp/nvhe/ffa.c
Original file line number Diff line number Diff line change
Expand Up @@ -634,9 +634,8 @@ static bool do_ffa_features(struct arm_smccc_res *res,
return true;
}

bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt)
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
{
DECLARE_REG(u64, func_id, host_ctxt, 0);
struct arm_smccc_res res;

/*
Expand Down
1 change: 1 addition & 0 deletions arch/arm64/kvm/hyp/nvhe/hyp-init.S
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ __do_hyp_init:
cmp x0, #HVC_STUB_HCALL_NR
b.lo __kvm_handle_stub_hvc

bic x0, x0, #ARM_SMCCC_CALL_HINTS
mov x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
cmp x0, x3
b.eq 1f
Expand Down
8 changes: 6 additions & 2 deletions arch/arm64/kvm/hyp/nvhe/hyp-main.c
Original file line number Diff line number Diff line change
Expand Up @@ -368,6 +368,7 @@ static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
if (static_branch_unlikely(&kvm_protected_mode_initialized))
hcall_min = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;

id &= ~ARM_SMCCC_CALL_HINTS;
id -= KVM_HOST_SMCCC_ID(0);

if (unlikely(id < hcall_min || id >= ARRAY_SIZE(host_hcall)))
Expand All @@ -392,11 +393,14 @@ static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)

static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(u64, func_id, host_ctxt, 0);
bool handled;

handled = kvm_host_psci_handler(host_ctxt);
func_id &= ~ARM_SMCCC_CALL_HINTS;

handled = kvm_host_psci_handler(host_ctxt, func_id);
if (!handled)
handled = kvm_host_ffa_handler(host_ctxt);
handled = kvm_host_ffa_handler(host_ctxt, func_id);
if (!handled)
default_host_smc_handler(host_ctxt);

Expand Down
3 changes: 1 addition & 2 deletions arch/arm64/kvm/hyp/nvhe/psci-relay.c
Original file line number Diff line number Diff line change
Expand Up @@ -273,9 +273,8 @@ static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_
}
}

bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
{
DECLARE_REG(u64, func_id, host_ctxt, 0);
unsigned long ret;

switch (kvm_host_psci_config.version) {
Expand Down
3 changes: 3 additions & 0 deletions arch/arm64/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -652,6 +652,9 @@ int hyp_alloc_private_va_range(size_t size, unsigned long *haddr)

mutex_unlock(&kvm_hyp_pgd_mutex);

if (!ret)
*haddr = base;

return ret;
}

Expand Down
7 changes: 5 additions & 2 deletions arch/riscv/kvm/vcpu_onereg.c
Original file line number Diff line number Diff line change
Expand Up @@ -460,8 +460,11 @@ static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
return -ENOENT;

*reg_val = 0;
host_isa_ext = kvm_isa_ext_arr[reg_num];
if (!__riscv_isa_extension_available(NULL, host_isa_ext))
return -ENOENT;

*reg_val = 0;
if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
*reg_val = 1; /* Mark the given extension as available */

Expand Down Expand Up @@ -842,7 +845,7 @@ static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;

isa_ext = kvm_isa_ext_arr[i];
if (!__riscv_isa_extension_available(vcpu->arch.isa, isa_ext))
if (!__riscv_isa_extension_available(NULL, isa_ext))
continue;

if (uindices) {
Expand Down
3 changes: 1 addition & 2 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -1419,7 +1419,6 @@ struct kvm_arch {
* the thread holds the MMU lock in write mode.
*/
spinlock_t tdp_mmu_pages_lock;
struct workqueue_struct *tdp_mmu_zap_wq;
#endif /* CONFIG_X86_64 */

/*
Expand Down Expand Up @@ -1835,7 +1834,7 @@ void kvm_mmu_vendor_module_exit(void);

void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
int kvm_mmu_init_vm(struct kvm *kvm);
void kvm_mmu_init_vm(struct kvm *kvm);
void kvm_mmu_uninit_vm(struct kvm *kvm);

void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);
Expand Down
21 changes: 5 additions & 16 deletions arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -6167,20 +6167,15 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
}

int kvm_mmu_init_vm(struct kvm *kvm)
void kvm_mmu_init_vm(struct kvm *kvm)
{
int r;

INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);

if (tdp_mmu_enabled) {
r = kvm_mmu_init_tdp_mmu(kvm);
if (r < 0)
return r;
}
if (tdp_mmu_enabled)
kvm_mmu_init_tdp_mmu(kvm);

kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
Expand All @@ -6189,8 +6184,6 @@ int kvm_mmu_init_vm(struct kvm *kvm)

kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;

return 0;
}

static void mmu_free_vm_memory_caches(struct kvm *kvm)
Expand Down Expand Up @@ -6246,7 +6239,6 @@ static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_e
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{
bool flush;
int i;

if (WARN_ON_ONCE(gfn_end <= gfn_start))
return;
Expand All @@ -6257,11 +6249,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)

flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);

if (tdp_mmu_enabled) {
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
flush = kvm_tdp_mmu_zap_leafs(kvm, i, gfn_start,
gfn_end, true, flush);
}
if (tdp_mmu_enabled)
flush = kvm_tdp_mmu_zap_leafs(kvm, gfn_start, gfn_end, flush);

if (flush)
kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start);
Expand Down
15 changes: 7 additions & 8 deletions arch/x86/kvm/mmu/mmu_internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,12 @@ struct kvm_mmu_page {

bool tdp_mmu_page;
bool unsync;
u8 mmu_valid_gen;
union {
u8 mmu_valid_gen;

/* Only accessed under slots_lock. */
bool tdp_mmu_scheduled_root_to_zap;
};

/*
* The shadow page can't be replaced by an equivalent huge page
Expand Down Expand Up @@ -100,13 +105,7 @@ struct kvm_mmu_page {
struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
tdp_ptep_t ptep;
};
union {
DECLARE_BITMAP(unsync_child_bitmap, 512);
struct {
struct work_struct tdp_mmu_async_work;
void *tdp_mmu_async_data;
};
};
DECLARE_BITMAP(unsync_child_bitmap, 512);

/*
* Tracks shadow pages that, if zapped, would allow KVM to create an NX
Expand Down
Loading

0 comments on commit 8a511e7

Please sign in to comment.