Skip to content

Commit

Permalink
KVM: selftests: Automatically do init_ucall() for non-barebones VMs
Browse files Browse the repository at this point in the history
Do init_ucall() automatically during VM creation to kill two (three?)
birds with one stone.

First, initializing ucall immediately after VM creations allows forcing
aarch64's MMIO ucall address to immediately follow memslot0.  This is
still somewhat fragile as tests could clobber the MMIO address with a
new memslot, but it's safe-ish since tests have to be conversative when
accounting for memslot0.  And this can be hardened in the future by
creating a read-only memslot for the MMIO page (KVM ARM exits with MMIO
if the guest writes to a read-only memslot).  Add a TODO to document that
selftests can and should use a memslot for the ucall MMIO (doing so
requires yet more rework because tests assumes thay can use all memslots
except memslot0).

Second, initializing ucall for all VMs prepares for making ucall
initialization meaningful on all architectures.  aarch64 is currently the
only arch that needs to do any setup, but that will change in the future
by switching to a pool-based implementation (instead of the current
stack-based approach).

Lastly, defining the ucall MMIO address from common code will simplify
switching all architectures (except s390) to a common MMIO-based ucall
implementation (if there's ever sufficient motivation to do so).

Cc: Oliver Upton <oliver.upton@linux.dev>
Reviewed-by: Andrew Jones <andrew.jones@linux.dev>
Tested-by: Peter Gonda <pgonda@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/20221006003409.649993-4-seanjc@google.com
  • Loading branch information
Sean Christopherson committed Nov 17, 2022
1 parent ef38871 commit dc88244
Show file tree
Hide file tree
Showing 20 changed files with 20 additions and 76 deletions.
2 changes: 0 additions & 2 deletions tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c
Original file line number Diff line number Diff line change
Expand Up @@ -158,8 +158,6 @@ int main(void)

TEST_REQUIRE(vcpu_aarch64_only(vcpu));

ucall_init(vm, NULL);

test_user_raz_wi(vcpu);
test_user_raz_invariant(vcpu);
test_guest_raz(vcpu);
Expand Down
1 change: 0 additions & 1 deletion tools/testing/selftests/kvm/aarch64/arch_timer.c
Original file line number Diff line number Diff line change
Expand Up @@ -375,7 +375,6 @@ static struct kvm_vm *test_vm_create(void)
for (i = 0; i < nr_vcpus; i++)
vcpu_init_descriptor_tables(vcpus[i]);

ucall_init(vm, NULL);
test_init_timer_irq(vm);
gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
__TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3");
Expand Down
2 changes: 0 additions & 2 deletions tools/testing/selftests/kvm/aarch64/debug-exceptions.c
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,6 @@ static void test_guest_debug_exceptions(void)
int stage;

vm = vm_create_with_one_vcpu(&vcpu, guest_code);
ucall_init(vm, NULL);

vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
Expand Down Expand Up @@ -343,7 +342,6 @@ void test_single_step_from_userspace(int test_cnt)
struct kvm_guest_debug debug = {};

vm = vm_create_with_one_vcpu(&vcpu, guest_code_ss);
ucall_init(vm, NULL);
run = vcpu->run;
vcpu_args_set(vcpu, 1, test_cnt);

Expand Down
1 change: 0 additions & 1 deletion tools/testing/selftests/kvm/aarch64/hypercalls.c
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,6 @@ static struct kvm_vm *test_vm_create(struct kvm_vcpu **vcpu)

vm = vm_create_with_one_vcpu(vcpu, guest_code);

ucall_init(vm, NULL);
steal_time_init(*vcpu);

return vm;
Expand Down
1 change: 0 additions & 1 deletion tools/testing/selftests/kvm/aarch64/psci_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ static struct kvm_vm *setup_vm(void *guest_code, struct kvm_vcpu **source,
struct kvm_vm *vm;

vm = vm_create(2);
ucall_init(vm, NULL);

vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);
init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2);
Expand Down
2 changes: 0 additions & 2 deletions tools/testing/selftests/kvm/aarch64/vgic_init.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,6 @@ static void guest_code(void)
/* we don't want to assert on run execution, hence that helper */
static int run_vcpu(struct kvm_vcpu *vcpu)
{
ucall_init(vcpu->vm, NULL);

return __vcpu_run(vcpu) ? -errno : 0;
}

Expand Down
1 change: 0 additions & 1 deletion tools/testing/selftests/kvm/aarch64/vgic_irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -756,7 +756,6 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
print_args(&args);

vm = vm_create_with_one_vcpu(&vcpu, guest_code);
ucall_init(vm, NULL);

vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
Expand Down
2 changes: 0 additions & 2 deletions tools/testing/selftests/kvm/dirty_log_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -756,8 +756,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
/* Cache the HVA pointer of the region */
host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);

ucall_init(vm, NULL);

/* Export the shared variables to the guest */
sync_global_to_guest(vm, host_page_size);
sync_global_to_guest(vm, guest_page_size);
Expand Down
6 changes: 3 additions & 3 deletions tools/testing/selftests/kvm/include/ucall_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,17 +24,17 @@ struct ucall {
uint64_t args[UCALL_MAX_ARGS];
};

void ucall_arch_init(struct kvm_vm *vm, void *arg);
void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa);
void ucall_arch_uninit(struct kvm_vm *vm);
void ucall_arch_do_ucall(vm_vaddr_t uc);
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu);

void ucall(uint64_t cmd, int nargs, ...);
uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc);

static inline void ucall_init(struct kvm_vm *vm, void *arg)
static inline void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
{
ucall_arch_init(vm, arg);
ucall_arch_init(vm, mmio_gpa);
}

static inline void ucall_uninit(struct kvm_vm *vm)
Expand Down
1 change: 0 additions & 1 deletion tools/testing/selftests/kvm/kvm_page_table_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,6 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);

/* Export shared structure test_args to guest */
ucall_init(vm, NULL);
sync_global_to_guest(vm, test_args);

ret = sem_init(&test_stage_updated, 0, 0);
Expand Down
54 changes: 3 additions & 51 deletions tools/testing/selftests/kvm/lib/aarch64/ucall.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,60 +8,12 @@

static vm_vaddr_t *ucall_exit_mmio_addr;

static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa)
void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
{
if (kvm_userspace_memory_region_find(vm, gpa, gpa + 1))
return false;
virt_pg_map(vm, mmio_gpa, mmio_gpa);

virt_pg_map(vm, gpa, gpa);

ucall_exit_mmio_addr = (vm_vaddr_t *)gpa;
ucall_exit_mmio_addr = (vm_vaddr_t *)mmio_gpa;
sync_global_to_guest(vm, ucall_exit_mmio_addr);

return true;
}

void ucall_arch_init(struct kvm_vm *vm, void *arg)
{
vm_paddr_t gpa, start, end, step, offset;
unsigned int bits;
bool ret;

if (arg) {
gpa = (vm_paddr_t)arg;
ret = ucall_mmio_init(vm, gpa);
TEST_ASSERT(ret, "Can't set ucall mmio address to %lx", gpa);
return;
}

/*
* Find an address within the allowed physical and virtual address
* spaces, that does _not_ have a KVM memory region associated with
* it. Identity mapping an address like this allows the guest to
* access it, but as KVM doesn't know what to do with it, it
* will assume it's something userspace handles and exit with
* KVM_EXIT_MMIO. Well, at least that's how it works for AArch64.
* Here we start with a guess that the addresses around 5/8th
* of the allowed space are unmapped and then work both down and
* up from there in 1/16th allowed space sized steps.
*
* Note, we need to use VA-bits - 1 when calculating the allowed
* virtual address space for an identity mapping because the upper
* half of the virtual address space is the two's complement of the
* lower and won't match physical addresses.
*/
bits = vm->va_bits - 1;
bits = min(vm->pa_bits, bits);
end = 1ul << bits;
start = end * 5 / 8;
step = end / 16;
for (offset = 0; offset < end - start; offset += step) {
if (ucall_mmio_init(vm, start - offset))
return;
if (ucall_mmio_init(vm, start + offset))
return;
}
TEST_FAIL("Can't find a ucall mmio address");
}

void ucall_arch_uninit(struct kvm_vm *vm)
Expand Down
11 changes: 11 additions & 0 deletions tools/testing/selftests/kvm/lib/kvm_util.c
Original file line number Diff line number Diff line change
Expand Up @@ -335,15 +335,26 @@ struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
{
uint64_t nr_pages = vm_nr_pages_required(mode, nr_runnable_vcpus,
nr_extra_pages);
struct userspace_mem_region *slot0;
struct kvm_vm *vm;

vm = ____vm_create(mode, nr_pages);

kvm_vm_elf_load(vm, program_invocation_name);

/*
* TODO: Add proper defines to protect the library's memslots, and then
* carve out memslot1 for the ucall MMIO address. KVM treats writes to
* read-only memslots as MMIO, and creating a read-only memslot for the
* MMIO region would prevent silently clobbering the MMIO region.
*/
slot0 = memslot2region(vm, 0);
ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size);

#ifdef __x86_64__
vm_create_irqchip(vm);
#endif

return vm;
}

Expand Down
2 changes: 0 additions & 2 deletions tools/testing/selftests/kvm/lib/memstress.c
Original file line number Diff line number Diff line change
Expand Up @@ -221,8 +221,6 @@ struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
memstress_setup_nested(vm, nr_vcpus, vcpus);
}

ucall_init(vm, NULL);

/* Export the shared variables to the guest. */
sync_global_to_guest(vm, memstress_args);

Expand Down
2 changes: 1 addition & 1 deletion tools/testing/selftests/kvm/lib/riscv/ucall.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
#include "kvm_util.h"
#include "processor.h"

void ucall_arch_init(struct kvm_vm *vm, void *arg)
void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
{
}

Expand Down
2 changes: 1 addition & 1 deletion tools/testing/selftests/kvm/lib/s390x/ucall.c
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
*/
#include "kvm_util.h"

void ucall_arch_init(struct kvm_vm *vm, void *arg)
void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
{
}

Expand Down
2 changes: 1 addition & 1 deletion tools/testing/selftests/kvm/lib/x86_64/ucall.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

#define UCALL_PIO_PORT ((uint16_t)0x1000)

void ucall_arch_init(struct kvm_vm *vm, void *arg)
void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
{
}

Expand Down
1 change: 0 additions & 1 deletion tools/testing/selftests/kvm/memslot_perf_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,6 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
TEST_ASSERT(data->hva_slots, "malloc() fail");

data->vm = __vm_create_with_one_vcpu(&data->vcpu, mempages, guest_code);
ucall_init(data->vm, NULL);

pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n",
max_mem_slots - 1, data->pages_per_slot, rempages);
Expand Down
1 change: 0 additions & 1 deletion tools/testing/selftests/kvm/rseq_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,6 @@ int main(int argc, char *argv[])
* CPU affinity.
*/
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
ucall_init(vm, NULL);

pthread_create(&migration_thread, NULL, migration_worker,
(void *)(unsigned long)syscall(SYS_gettid));
Expand Down
1 change: 0 additions & 1 deletion tools/testing/selftests/kvm/steal_time.c
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,6 @@ int main(int ac, char **av)
gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE * NR_VCPUS);
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages);
ucall_init(vm, NULL);

TEST_REQUIRE(is_steal_time_supported(vcpus[0]));

Expand Down
1 change: 0 additions & 1 deletion tools/testing/selftests/kvm/system_counter_offset_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,6 @@ int main(void)

vm = vm_create_with_one_vcpu(&vcpu, guest_main);
check_preconditions(vcpu);
ucall_init(vm, NULL);

enter_guest(vcpu);
kvm_vm_free(vm);
Expand Down

0 comments on commit dc88244

Please sign in to comment.