Skip to content

Commit

Permalink
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Browse files Browse the repository at this point in the history
Pull kvm fixes from Paolo Bonzini:
 "ARM:

   - Fix unexpected sign extension of KVM_ARM_DEVICE_ID_MASK

   - Tidy-up handling of AArch32 on asymmetric systems

  x86:

   - Fix 'missing ENDBR' BUG for fastop functions

  Generic:

   - Some cleanup and static analyzer patches

   - More fixes to KVM_CREATE_VM unwind paths"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: Drop unnecessary initialization of "ops" in kvm_ioctl_create_device()
  KVM: Drop unnecessary initialization of "npages" in hva_to_pfn_slow()
  x86/kvm: Fix "missing ENDBR" BUG for fastop functions
  x86/kvm: Simplify FOP_SETCC()
  x86/ibt, objtool: Add IBT_NOSEAL()
  KVM: Rename mmu_notifier_* to mmu_invalidate_*
  KVM: Rename KVM_PRIVATE_MEM_SLOTS to KVM_INTERNAL_MEM_SLOTS
  KVM: MIPS: remove unnecessary definition of KVM_PRIVATE_MEM_SLOTS
  KVM: Move coalesced MMIO initialization (back) into kvm_create_vm()
  KVM: Unconditionally get a ref to /dev/kvm module when creating a VM
  KVM: Properly unwind VM creation if creating debugfs fails
  KVM: arm64: Reject 32bit user PSTATE on asymmetric systems
  KVM: arm64: Treat PMCR_EL1.LC as RES1 on asymmetric systems
  KVM: arm64: Fix compile error due to sign extension
  • Loading branch information
Linus Torvalds committed Aug 19, 2022
2 parents 42c54d5 + 959d6c4 commit ca052cf
Show file tree
Hide file tree
Showing 25 changed files with 157 additions and 156 deletions.
4 changes: 4 additions & 0 deletions arch/arm64/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -929,6 +929,10 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
(system_supports_mte() && \
test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))

#define kvm_supports_32bit_el0() \
(system_supports_32bit_el0() && \
!static_branch_unlikely(&arm64_mismatched_32bit_el0))

int kvm_trng_call(struct kvm_vcpu *vcpu);
#ifdef CONFIG_KVM
extern phys_addr_t hyp_mem_base;
Expand Down
6 changes: 4 additions & 2 deletions arch/arm64/include/uapi/asm/kvm.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,11 @@ struct kvm_regs {

/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
#define KVM_ARM_DEVICE_TYPE_SHIFT 0
#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
#define KVM_ARM_DEVICE_TYPE_MASK GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \
KVM_ARM_DEVICE_TYPE_SHIFT)
#define KVM_ARM_DEVICE_ID_SHIFT 16
#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
#define KVM_ARM_DEVICE_ID_MASK GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \
KVM_ARM_DEVICE_ID_SHIFT)

/* Supported device IDs */
#define KVM_ARM_DEVICE_VGIC_V2 0
Expand Down
3 changes: 1 addition & 2 deletions arch/arm64/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -757,8 +757,7 @@ static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
if (likely(!vcpu_mode_is_32bit(vcpu)))
return false;

return !system_supports_32bit_el0() ||
static_branch_unlikely(&arm64_mismatched_32bit_el0);
return !kvm_supports_32bit_el0();
}

/**
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kvm/guest.c
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
switch (mode) {
case PSR_AA32_MODE_USR:
if (!system_supports_32bit_el0())
if (!kvm_supports_32bit_el0())
return -EINVAL;
break;
case PSR_AA32_MODE_FIQ:
Expand Down
8 changes: 4 additions & 4 deletions arch/arm64/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -993,7 +993,7 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
* THP doesn't start to split while we are adjusting the
* refcounts.
*
* We are sure this doesn't happen, because mmu_notifier_retry
* We are sure this doesn't happen, because mmu_invalidate_retry
* was successful and we are holding the mmu_lock, so if this
* THP is trying to split, it will be blocked in the mmu
* notifier before touching any of the pages, specifically
Expand Down Expand Up @@ -1188,9 +1188,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return ret;
}

mmu_seq = vcpu->kvm->mmu_notifier_seq;
mmu_seq = vcpu->kvm->mmu_invalidate_seq;
/*
* Ensure the read of mmu_notifier_seq happens before we call
* Ensure the read of mmu_invalidate_seq happens before we call
* gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
* the page we just got a reference to gets unmapped before we have a
* chance to grab the mmu_lock, which ensure that if the page gets
Expand Down Expand Up @@ -1246,7 +1246,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
else
write_lock(&kvm->mmu_lock);
pgt = vcpu->arch.hw_mmu->pgt;
if (mmu_notifier_retry(kvm, mmu_seq))
if (mmu_invalidate_retry(kvm, mmu_seq))
goto out_unlock;

/*
Expand Down
4 changes: 2 additions & 2 deletions arch/arm64/kvm/sys_regs.c
Original file line number Diff line number Diff line change
Expand Up @@ -652,7 +652,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
*/
val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
| (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
if (!system_supports_32bit_el0())
if (!kvm_supports_32bit_el0())
val |= ARMV8_PMU_PMCR_LC;
__vcpu_sys_reg(vcpu, r->reg) = val;
}
Expand Down Expand Up @@ -701,7 +701,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
val = __vcpu_sys_reg(vcpu, PMCR_EL0);
val &= ~ARMV8_PMU_PMCR_MASK;
val |= p->regval & ARMV8_PMU_PMCR_MASK;
if (!system_supports_32bit_el0())
if (!kvm_supports_32bit_el0())
val |= ARMV8_PMU_PMCR_LC;
__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
kvm_pmu_handle_pmcr(vcpu, val);
Expand Down
2 changes: 0 additions & 2 deletions arch/mips/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,8 +84,6 @@


#define KVM_MAX_VCPUS 16
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 0

#define KVM_HALT_POLL_NS_DEFAULT 500000

Expand Down
12 changes: 6 additions & 6 deletions arch/mips/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -615,17 +615,17 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
* Used to check for invalidations in progress, of the pfn that is
* returned by pfn_to_pfn_prot below.
*/
mmu_seq = kvm->mmu_notifier_seq;
mmu_seq = kvm->mmu_invalidate_seq;
/*
* Ensure the read of mmu_notifier_seq isn't reordered with PTE reads in
* gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
* Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads
* in gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
* risk the page we get a reference to getting unmapped before we have a
* chance to grab the mmu_lock without mmu_notifier_retry() noticing.
* chance to grab the mmu_lock without mmu_invalidate_retry() noticing.
*
* This smp_rmb() pairs with the effective smp_wmb() of the combination
* of the pte_unmap_unlock() after the PTE is zapped, and the
* spin_lock() in kvm_mmu_notifier_invalidate_<page|range_end>() before
* mmu_notifier_seq is incremented.
* mmu_invalidate_seq is incremented.
*/
smp_rmb();

Expand All @@ -638,7 +638,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,

spin_lock(&kvm->mmu_lock);
/* Check if an invalidation has taken place since we got pfn */
if (mmu_notifier_retry(kvm, mmu_seq)) {
if (mmu_invalidate_retry(kvm, mmu_seq)) {
/*
* This can happen when mappings are changed asynchronously, but
* also synchronously if a COW is triggered by
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/include/asm/kvm_book3s_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -666,7 +666,7 @@ static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq,
VM_WARN(!spin_is_locked(&kvm->mmu_lock),
"%s called with kvm mmu_lock not held \n", __func__);

if (mmu_notifier_retry(kvm, mmu_seq))
if (mmu_invalidate_retry(kvm, mmu_seq))
return NULL;

pte = __find_linux_pte(kvm->mm->pgd, ea, NULL, hshift);
Expand Down
4 changes: 2 additions & 2 deletions arch/powerpc/kvm/book3s_64_mmu_host.c
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
unsigned long pfn;

/* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq;
mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();

/* Get host physical address for gpa */
Expand Down Expand Up @@ -151,7 +151,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
cpte = kvmppc_mmu_hpte_cache_next(vcpu);

spin_lock(&kvm->mmu_lock);
if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
if (!cpte || mmu_invalidate_retry(kvm, mmu_seq)) {
r = -EAGAIN;
goto out_unlock;
}
Expand Down
4 changes: 2 additions & 2 deletions arch/powerpc/kvm/book3s_64_mmu_hv.c
Original file line number Diff line number Diff line change
Expand Up @@ -578,7 +578,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
return -EFAULT;

/* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq;
mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();

ret = -EFAULT;
Expand Down Expand Up @@ -693,7 +693,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,

/* Check if we might have been invalidated; let the guest retry if so */
ret = RESUME_GUEST;
if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
if (mmu_invalidate_retry(vcpu->kvm, mmu_seq)) {
unlock_rmap(rmap);
goto out_unlock;
}
Expand Down
6 changes: 3 additions & 3 deletions arch/powerpc/kvm/book3s_64_mmu_radix.c
Original file line number Diff line number Diff line change
Expand Up @@ -640,7 +640,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
/* Check if we might have been invalidated; let the guest retry if so */
spin_lock(&kvm->mmu_lock);
ret = -EAGAIN;
if (mmu_notifier_retry(kvm, mmu_seq))
if (mmu_invalidate_retry(kvm, mmu_seq))
goto out_unlock;

/* Now traverse again under the lock and change the tree */
Expand Down Expand Up @@ -830,7 +830,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
bool large_enable;

/* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq;
mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();

/*
Expand Down Expand Up @@ -1191,7 +1191,7 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
* Increase the mmu notifier sequence number to prevent any page
* fault that read the memslot earlier from writing a PTE.
*/
kvm->mmu_notifier_seq++;
kvm->mmu_invalidate_seq++;
spin_unlock(&kvm->mmu_lock);
}

Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kvm/book3s_hv_nested.c
Original file line number Diff line number Diff line change
Expand Up @@ -1580,7 +1580,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
/* 2. Find the host pte for this L1 guest real address */

/* Used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq;
mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();

/* See if can find translation in our partition scoped tables for L1 */
Expand Down
8 changes: 4 additions & 4 deletions arch/powerpc/kvm/book3s_hv_rm_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
g_ptel = ptel;

/* used later to detect if we might have been invalidated */
mmu_seq = kvm->mmu_notifier_seq;
mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();

/* Find the memslot (if any) for this address */
Expand Down Expand Up @@ -366,7 +366,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
rmap = real_vmalloc_addr(rmap);
lock_rmap(rmap);
/* Check for pending invalidations under the rmap chain lock */
if (mmu_notifier_retry(kvm, mmu_seq)) {
if (mmu_invalidate_retry(kvm, mmu_seq)) {
/* inval in progress, write a non-present HPTE */
pteh |= HPTE_V_ABSENT;
pteh &= ~HPTE_V_VALID;
Expand Down Expand Up @@ -932,7 +932,7 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
int i;

/* Used later to detect if we might have been invalidated */
mmu_seq = kvm->mmu_notifier_seq;
mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();

arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
Expand Down Expand Up @@ -960,7 +960,7 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
long ret = H_SUCCESS;

/* Used later to detect if we might have been invalidated */
mmu_seq = kvm->mmu_notifier_seq;
mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();

arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
Expand Down
4 changes: 2 additions & 2 deletions arch/powerpc/kvm/e500_mmu_host.c
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
unsigned long flags;

/* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq;
mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();

/*
Expand Down Expand Up @@ -460,7 +460,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
}

spin_lock(&kvm->mmu_lock);
if (mmu_notifier_retry(kvm, mmu_seq)) {
if (mmu_invalidate_retry(kvm, mmu_seq)) {
ret = -EAGAIN;
goto out;
}
Expand Down
4 changes: 2 additions & 2 deletions arch/riscv/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -666,7 +666,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
return ret;
}

mmu_seq = kvm->mmu_notifier_seq;
mmu_seq = kvm->mmu_invalidate_seq;

hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable);
if (hfn == KVM_PFN_ERR_HWPOISON) {
Expand All @@ -686,7 +686,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,

spin_lock(&kvm->mmu_lock);

if (mmu_notifier_retry(kvm, mmu_seq))
if (mmu_invalidate_retry(kvm, mmu_seq))
goto out_unlock;

if (writable) {
Expand Down
11 changes: 11 additions & 0 deletions arch/x86/include/asm/ibt.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,16 @@

#define __noendbr __attribute__((nocf_check))

/*
* Create a dummy function pointer reference to prevent objtool from marking
* the function as needing to be "sealed" (i.e. ENDBR converted to NOP by
* apply_ibt_endbr()).
*/
#define IBT_NOSEAL(fname) \
".pushsection .discard.ibt_endbr_noseal\n\t" \
_ASM_PTR fname "\n\t" \
".popsection\n\t"

static inline __attribute_const__ u32 gen_endbr(void)
{
u32 endbr;
Expand Down Expand Up @@ -84,6 +94,7 @@ extern __noendbr void ibt_restore(u64 save);
#ifndef __ASSEMBLY__

#define ASM_ENDBR
#define IBT_NOSEAL(name)

#define __noendbr

Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@
#define KVM_MAX_VCPU_IDS (KVM_MAX_VCPUS * KVM_VCPU_ID_RATIO)

/* memory slots that are not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 3
#define KVM_INTERNAL_MEM_SLOTS 3

#define KVM_HALT_POLL_NS_DEFAULT 200000

Expand Down
26 changes: 6 additions & 20 deletions arch/x86/kvm/emulate.c
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,8 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
".align " __stringify(FASTOP_SIZE) " \n\t" \
".type " name ", @function \n\t" \
name ":\n\t" \
ASM_ENDBR
ASM_ENDBR \
IBT_NOSEAL(name)

#define FOP_FUNC(name) \
__FOP_FUNC(#name)
Expand Down Expand Up @@ -446,27 +447,12 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
FOP_END

/* Special case for SETcc - 1 instruction per cc */

/*
* Depending on .config the SETcc functions look like:
*
* ENDBR [4 bytes; CONFIG_X86_KERNEL_IBT]
* SETcc %al [3 bytes]
* RET | JMP __x86_return_thunk [1,5 bytes; CONFIG_RETHUNK]
* INT3 [1 byte; CONFIG_SLS]
*/
#define SETCC_ALIGN 16

#define FOP_SETCC(op) \
".align " __stringify(SETCC_ALIGN) " \n\t" \
".type " #op ", @function \n\t" \
#op ": \n\t" \
ASM_ENDBR \
FOP_FUNC(op) \
#op " %al \n\t" \
__FOP_RET(#op) \
".skip " __stringify(SETCC_ALIGN) " - (.-" #op "), 0xcc \n\t"
FOP_RET(op)

__FOP_START(setcc, SETCC_ALIGN)
FOP_START(setcc)
FOP_SETCC(seto)
FOP_SETCC(setno)
FOP_SETCC(setc)
Expand Down Expand Up @@ -1079,7 +1065,7 @@ static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
{
u8 rc;
void (*fop)(void) = (void *)em_setcc + SETCC_ALIGN * (condition & 0xf);
void (*fop)(void) = (void *)em_setcc + FASTOP_SIZE * (condition & 0xf);

flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
asm("push %[flags]; popf; " CALL_NOSPEC
Expand Down
Loading

0 comments on commit ca052cf

Please sign in to comment.