Skip to content

Commit

Permalink
Merge tag 'kvm-x86-fixes-6.4' of https://github.com/kvm-x86/linux int…
Browse files Browse the repository at this point in the history
…o HEAD

KVM x86 fixes for 6.4

 - Fix a memslot lookup bug in the NX recovery thread that could
   theoretically let userspace bypass the NX hugepage mitigation

 - Fix a s/BLOCKING/PENDING bug in SVM's vNMI support

 - Account exit stats for fastpath VM-Exits that never leave the super
   tight run-loop

 - Fix an out-of-bounds bug in the optimized APIC map code, and add a
   regression test for the race.
  • Loading branch information
Paolo Bonzini committed Jun 3, 2023
2 parents 49661a5 + 47d2804 commit f211b45
Show file tree
Hide file tree
Showing 6 changed files with 101 additions and 4 deletions.
20 changes: 18 additions & 2 deletions arch/x86/kvm/lapic.c
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,23 @@ static int kvm_recalculate_phys_map(struct kvm_apic_map *new,
u32 xapic_id = kvm_xapic_id(apic);
u32 physical_id;

/*
* For simplicity, KVM always allocates enough space for all possible
* xAPIC IDs. Yell, but don't kill the VM, as KVM can continue on
* without the optimized map.
*/
if (WARN_ON_ONCE(xapic_id > new->max_apic_id))
return -EINVAL;

/*
* Bail if a vCPU was added and/or enabled its APIC between allocating
* the map and doing the actual calculations for the map. Note, KVM
* hardcodes the x2APIC ID to vcpu_id, i.e. there's no TOCTOU bug if
* the compiler decides to reload x2apic_id after this check.
*/
if (x2apic_id > new->max_apic_id)
return -E2BIG;

/*
* Deliberately truncate the vCPU ID when detecting a mismatched APIC
* ID to avoid false positives if the vCPU ID, i.e. x2APIC ID, is a
Expand All @@ -253,8 +270,7 @@ static int kvm_recalculate_phys_map(struct kvm_apic_map *new,
*/
if (vcpu->kvm->arch.x2apic_format) {
/* See also kvm_apic_match_physical_addr(). */
if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) &&
x2apic_id <= new->max_apic_id)
if (apic_x2apic_mode(apic) || x2apic_id > 0xff)
new->phys_map[x2apic_id] = apic;

if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
Expand Down
5 changes: 4 additions & 1 deletion arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -7091,7 +7091,10 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm)
*/
slot = NULL;
if (atomic_read(&kvm->nr_memslots_dirty_logging)) {
slot = gfn_to_memslot(kvm, sp->gfn);
struct kvm_memslots *slots;

slots = kvm_memslots_for_spte_role(kvm, sp->role);
slot = __gfn_to_memslot(slots, sp->gfn);
WARN_ON_ONCE(!slot);
}

Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kvm/svm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -3510,7 +3510,7 @@ static bool svm_is_vnmi_pending(struct kvm_vcpu *vcpu)
if (!is_vnmi_enabled(svm))
return false;

return !!(svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK);
return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK);
}

static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu)
Expand Down
3 changes: 3 additions & 0 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -10758,6 +10758,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
break;
}

/* Note, VM-Exits that go down the "slow" path are accounted below. */
++vcpu->stat.exits;
}

/*
Expand Down
1 change: 1 addition & 0 deletions tools/testing/selftests/kvm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests
TEST_GEN_PROGS_x86_64 += x86_64/amx_test
TEST_GEN_PROGS_x86_64 += x86_64/max_vcpuid_cap_test
TEST_GEN_PROGS_x86_64 += x86_64/triple_fault_event_test
TEST_GEN_PROGS_x86_64 += x86_64/recalc_apic_map_test
TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
TEST_GEN_PROGS_x86_64 += demand_paging_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
Expand Down
74 changes: 74 additions & 0 deletions tools/testing/selftests/kvm/x86_64/recalc_apic_map_test.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Test edge cases and race conditions in kvm_recalculate_apic_map().
*/

#include <sys/ioctl.h>
#include <pthread.h>
#include <time.h>

#include "processor.h"
#include "test_util.h"
#include "kvm_util.h"
#include "apic.h"

#define TIMEOUT 5 /* seconds */

#define LAPIC_DISABLED 0
#define LAPIC_X2APIC (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)
#define MAX_XAPIC_ID 0xff

static void *race(void *arg)
{
struct kvm_lapic_state lapic = {};
struct kvm_vcpu *vcpu = arg;

while (1) {
/* Trigger kvm_recalculate_apic_map(). */
vcpu_ioctl(vcpu, KVM_SET_LAPIC, &lapic);
pthread_testcancel();
}

return NULL;
}

int main(void)
{
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
struct kvm_vcpu *vcpuN;
struct kvm_vm *vm;
pthread_t thread;
time_t t;
int i;

kvm_static_assert(KVM_MAX_VCPUS > MAX_XAPIC_ID);

/*
* Create the max number of vCPUs supported by selftests so that KVM
* has decent amount of work to do when recalculating the map, i.e. to
* make the problematic window large enough to hit.
*/
vm = vm_create_with_vcpus(KVM_MAX_VCPUS, NULL, vcpus);

/*
* Enable x2APIC on all vCPUs so that KVM doesn't bail from the recalc
* due to vCPUs having aliased xAPIC IDs (truncated to 8 bits).
*/
for (i = 0; i < KVM_MAX_VCPUS; i++)
vcpu_set_msr(vcpus[i], MSR_IA32_APICBASE, LAPIC_X2APIC);

ASSERT_EQ(pthread_create(&thread, NULL, race, vcpus[0]), 0);

vcpuN = vcpus[KVM_MAX_VCPUS - 1];
for (t = time(NULL) + TIMEOUT; time(NULL) < t;) {
vcpu_set_msr(vcpuN, MSR_IA32_APICBASE, LAPIC_X2APIC);
vcpu_set_msr(vcpuN, MSR_IA32_APICBASE, LAPIC_DISABLED);
}

ASSERT_EQ(pthread_cancel(thread), 0);
ASSERT_EQ(pthread_join(thread, NULL), 0);

kvm_vm_free(vm);

return 0;
}

0 comments on commit f211b45

Please sign in to comment.