Skip to content

Commit

Permalink
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Browse files Browse the repository at this point in the history
Pull KVM fixes from Paolo Bonzini:
 "ARM fixes:

   - Another state update on exit to userspace fix

   - Prevent the creation of mixed 32/64 VMs

   - Fix regression with irqbypass not restarting the guest on failed
     connect

   - Fix regression with debug register decoding resulting in
     overlapping access

   - Commit exception state on exit to usrspace

   - Fix the MMU notifier return values

   - Add missing 'static' qualifiers in the new host stage-2 code

  x86 fixes:

   - fix guest missed wakeup with assigned devices

   - fix WARN reported by syzkaller

   - do not use BIT() in UAPI headers

   - make the kvm_amd.avic parameter bool

  PPC fixes:

   - make halt polling heuristics consistent with other architectures

  selftests:

   - various fixes

   - new performance selftest memslot_perf_test

   - test UFFD minor faults in demand_paging_test"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (44 commits)
  selftests: kvm: fix overlapping addresses in memslot_perf_test
  KVM: X86: Kill off ctxt->ud
  KVM: X86: Fix warning caused by stale emulation context
  KVM: X86: Use kvm_get_linear_rip() in single-step and #DB/#BP interception
  KVM: x86/mmu: Fix comment mentioning skip_4k
  KVM: VMX: update vcpu posted-interrupt descriptor when assigning device
  KVM: rename KVM_REQ_PENDING_TIMER to KVM_REQ_UNBLOCK
  KVM: x86: add start_assignment hook to kvm_x86_ops
  KVM: LAPIC: Narrow the timer latency between wait_lapic_expire and world switch
  selftests: kvm: do only 1 memslot_perf_test run by default
  KVM: X86: Use _BITUL() macro in UAPI headers
  KVM: selftests: add shared hugetlbfs backing source type
  KVM: selftests: allow using UFFD minor faults for demand paging
  KVM: selftests: create alias mappings when using shared memory
  KVM: selftests: add shmem backing source type
  KVM: selftests: refactor vm_mem_backing_src_type flags
  KVM: selftests: allow different backing source types
  KVM: selftests: compute correct demand paging size
  KVM: selftests: simplify setup_demand_paging error handling
  KVM: selftests: Print a message if /dev/kvm is missing
  ...
  • Loading branch information
Linus Torvalds committed May 29, 2021
2 parents 866c4b8 + 000ac42 commit 2244782
Show file tree
Hide file tree
Showing 52 changed files with 1,694 additions and 287 deletions.
8 changes: 5 additions & 3 deletions Documentation/virt/kvm/vcpu-requests.rst
Original file line number Diff line number Diff line change
Expand Up @@ -118,10 +118,12 @@ KVM_REQ_MMU_RELOAD
necessary to inform each VCPU to completely refresh the tables. This
request is used for that.

KVM_REQ_PENDING_TIMER
KVM_REQ_UNBLOCK

This request may be made from a timer handler run on the host on behalf
of a VCPU. It informs the VCPU thread to inject a timer interrupt.
This request informs the vCPU to exit kvm_vcpu_block. It is used for
example from timer handlers that run on the host on behalf of a vCPU,
or in order to update the interrupt routing and ensure that assigned
devices will wake up the vCPU.

KVM_REQ_UNHALT

Expand Down
3 changes: 3 additions & 0 deletions arch/arm64/include/asm/kvm_asm.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@
#define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18
#define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19
#define __KVM_HOST_SMCCC_FUNC___pkvm_mark_hyp 20
#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 21

#ifndef __ASSEMBLY__

Expand Down Expand Up @@ -201,6 +202,8 @@ extern void __kvm_timer_set_cntvoff(u64 cntvoff);

extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);

extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);

extern u64 __vgic_v3_get_gic_config(void);
extern u64 __vgic_v3_read_vmcr(void);
extern void __vgic_v3_write_vmcr(u32 vmcr);
Expand Down
5 changes: 5 additions & 0 deletions arch/arm64/include/asm/kvm_emulate.h
Original file line number Diff line number Diff line change
Expand Up @@ -463,4 +463,9 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
}

static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
{
return test_bit(feature, vcpu->arch.features);
}

#endif /* __ARM64_KVM_EMULATE_H__ */
20 changes: 17 additions & 3 deletions arch/arm64/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -720,11 +720,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
return ret;
}

if (run->immediate_exit)
return -EINTR;

vcpu_load(vcpu);

if (run->immediate_exit) {
ret = -EINTR;
goto out;
}

kvm_sigset_activate(vcpu);

ret = 1;
Expand Down Expand Up @@ -897,6 +899,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)

kvm_sigset_deactivate(vcpu);

out:
/*
* In the unlikely event that we are returning to userspace
* with pending exceptions or PC adjustment, commit these
* adjustments in order to give userspace a consistent view of
* the vcpu state. Note that this relies on __kvm_adjust_pc()
* being preempt-safe on VHE.
*/
if (unlikely(vcpu->arch.flags & (KVM_ARM64_PENDING_EXCEPTION |
KVM_ARM64_INCREMENT_PC)))
kvm_call_hyp(__kvm_adjust_pc, vcpu);

vcpu_put(vcpu);
return ret;
}
Expand Down
18 changes: 17 additions & 1 deletion arch/arm64/kvm/hyp/exception.c
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,7 @@ static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
*vcpu_pc(vcpu) = vect_offset;
}

void kvm_inject_exception(struct kvm_vcpu *vcpu)
static void kvm_inject_exception(struct kvm_vcpu *vcpu)
{
if (vcpu_el1_is_32bit(vcpu)) {
switch (vcpu->arch.flags & KVM_ARM64_EXCEPT_MASK) {
Expand Down Expand Up @@ -329,3 +329,19 @@ void kvm_inject_exception(struct kvm_vcpu *vcpu)
}
}
}

/*
* Adjust the guest PC (and potentially exception state) depending on
* flags provided by the emulation code.
*/
void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
kvm_inject_exception(vcpu);
vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
KVM_ARM64_EXCEPT_MASK);
} else if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
kvm_skip_instr(vcpu);
vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
}
}
18 changes: 0 additions & 18 deletions arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,6 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_host.h>

void kvm_inject_exception(struct kvm_vcpu *vcpu);

static inline void kvm_skip_instr(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu)) {
Expand Down Expand Up @@ -43,22 +41,6 @@ static inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
}

/*
* Adjust the guest PC on entry, depending on flags provided by EL1
* for the purpose of emulation (MMIO, sysreg) or exception injection.
*/
static inline void __adjust_pc(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
kvm_inject_exception(vcpu);
vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
KVM_ARM64_EXCEPT_MASK);
} else if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
kvm_skip_instr(vcpu);
vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
}
}

/*
* Skip an instruction while host sysregs are live.
* Assumes host is always 64-bit.
Expand Down
8 changes: 8 additions & 0 deletions arch/arm64/kvm/hyp/nvhe/hyp-main.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,13 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu));
}

static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);

__kvm_adjust_pc(kern_hyp_va(vcpu));
}

static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
{
__kvm_flush_vm_context();
Expand Down Expand Up @@ -170,6 +177,7 @@ typedef void (*hcall_t)(struct kvm_cpu_context *);

static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_vcpu_run),
HANDLE_FUNC(__kvm_adjust_pc),
HANDLE_FUNC(__kvm_flush_vm_context),
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
HANDLE_FUNC(__kvm_tlb_flush_vmid),
Expand Down
4 changes: 2 additions & 2 deletions arch/arm64/kvm/hyp/nvhe/mem_protect.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@
extern unsigned long hyp_nr_cpus;
struct host_kvm host_kvm;

struct hyp_pool host_s2_mem;
struct hyp_pool host_s2_dev;
static struct hyp_pool host_s2_mem;
static struct hyp_pool host_s2_dev;

/*
* Copies of the host's CPU features registers holding sanitized values.
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kvm/hyp/nvhe/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
#include <nvhe/trap_handler.h>

struct hyp_pool hpool;
struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
unsigned long hyp_nr_cpus;

#define hyp_percpu_size ((unsigned long)__per_cpu_end - \
Expand All @@ -27,6 +26,7 @@ static void *vmemmap_base;
static void *hyp_pgt_base;
static void *host_s2_mem_pgt_base;
static void *host_s2_dev_pgt_base;
static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;

static int divide_memory_pool(void *virt, unsigned long size)
{
Expand Down
3 changes: 1 addition & 2 deletions arch/arm64/kvm/hyp/nvhe/switch.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/

#include <hyp/adjust_pc.h>
#include <hyp/switch.h>
#include <hyp/sysreg-sr.h>

Expand Down Expand Up @@ -201,7 +200,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
*/
__debug_save_host_buffers_nvhe(vcpu);

__adjust_pc(vcpu);
__kvm_adjust_pc(vcpu);

/*
* We must restore the 32-bit state before the sysregs, thanks
Expand Down
3 changes: 1 addition & 2 deletions arch/arm64/kvm/hyp/vhe/switch.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/

#include <hyp/adjust_pc.h>
#include <hyp/switch.h>

#include <linux/arm-smccc.h>
Expand Down Expand Up @@ -132,7 +131,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
__load_guest_stage2(vcpu->arch.hw_mmu);
__activate_traps(vcpu);

__adjust_pc(vcpu);
__kvm_adjust_pc(vcpu);

sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu);
Expand Down
12 changes: 6 additions & 6 deletions arch/arm64/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1156,21 +1156,21 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
if (!kvm->arch.mmu.pgt)
return 0;
return false;

__unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT,
(range->end - range->start) << PAGE_SHIFT,
range->may_block);

return 0;
return false;
}

bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
kvm_pfn_t pfn = pte_pfn(range->pte);

if (!kvm->arch.mmu.pgt)
return 0;
return false;

WARN_ON(range->end - range->start != 1);

Expand All @@ -1190,7 +1190,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
PAGE_SIZE, __pfn_to_phys(pfn),
KVM_PGTABLE_PROT_R, NULL);

return 0;
return false;
}

bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
Expand All @@ -1200,7 +1200,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
pte_t pte;

if (!kvm->arch.mmu.pgt)
return 0;
return false;

WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);

Expand All @@ -1213,7 +1213,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
if (!kvm->arch.mmu.pgt)
return 0;
return false;

return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt,
range->start << PAGE_SHIFT);
Expand Down
28 changes: 24 additions & 4 deletions arch/arm64/kvm/reset.c
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,25 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
return 0;
}

static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu *tmp;
bool is32bit;
int i;

is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
return false;

/* Check that the vcpus are either all 32bit or all 64bit */
kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit)
return false;
}

return true;
}

/**
* kvm_reset_vcpu - sets core registers and sys_regs to reset value
* @vcpu: The VCPU pointer
Expand Down Expand Up @@ -217,13 +236,14 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
}
}

if (!vcpu_allowed_register_width(vcpu)) {
ret = -EINVAL;
goto out;
}

switch (vcpu->arch.target) {
default:
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) {
ret = -EINVAL;
goto out;
}
pstate = VCPU_RESET_PSTATE_SVC;
} else {
pstate = VCPU_RESET_PSTATE_EL1;
Expand Down
Loading

0 comments on commit 2244782

Please sign in to comment.