Skip to content

Commit

Permalink
KVM: selftests: Rename perf_test_util symbols to memstress
Browse files Browse the repository at this point in the history
Replace the perf_test_ prefix on symbol names with memstress_ to match
the new file name.

"memstress" better describes the functionality proveded by this library,
which is to provide functionality for creating and running a VM that
stresses VM memory by reading and writing to guest memory on all vCPUs
in parallel.

"memstress" also contains the same number of chracters as "perf_test",
making it a drop-in replacement in symbols, e.g. function names, without
impacting line lengths. Also the lack of underscore between "mem" and
"stress" makes it clear "memstress" is a noun.

Signed-off-by: David Matlack <dmatlack@google.com>
Reviewed-by: Sean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/20221012165729.3505266-4-dmatlack@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
  • Loading branch information
David Matlack authored and Sean Christopherson committed Nov 16, 2022
1 parent a008a33 commit 7812d80
Show file tree
Hide file tree
Showing 7 changed files with 106 additions and 106 deletions.
18 changes: 9 additions & 9 deletions tools/testing/selftests/kvm/access_tracking_perf_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ static void mark_page_idle(int page_idle_fd, uint64_t pfn)
}

static void mark_vcpu_memory_idle(struct kvm_vm *vm,
struct perf_test_vcpu_args *vcpu_args)
struct memstress_vcpu_args *vcpu_args)
{
int vcpu_idx = vcpu_args->vcpu_idx;
uint64_t base_gva = vcpu_args->gva;
Expand All @@ -148,7 +148,7 @@ static void mark_vcpu_memory_idle(struct kvm_vm *vm,
TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap.");

for (page = 0; page < pages; page++) {
uint64_t gva = base_gva + page * perf_test_args.guest_page_size;
uint64_t gva = base_gva + page * memstress_args.guest_page_size;
uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva);

if (!pfn) {
Expand Down Expand Up @@ -220,10 +220,10 @@ static bool spin_wait_for_next_iteration(int *current_iteration)
return true;
}

static void vcpu_thread_main(struct perf_test_vcpu_args *vcpu_args)
static void vcpu_thread_main(struct memstress_vcpu_args *vcpu_args)
{
struct kvm_vcpu *vcpu = vcpu_args->vcpu;
struct kvm_vm *vm = perf_test_args.vm;
struct kvm_vm *vm = memstress_args.vm;
int vcpu_idx = vcpu_args->vcpu_idx;
int current_iteration = 0;

Expand Down Expand Up @@ -279,7 +279,7 @@ static void run_iteration(struct kvm_vm *vm, int nr_vcpus, const char *descripti
static void access_memory(struct kvm_vm *vm, int nr_vcpus,
enum access_type access, const char *description)
{
perf_test_set_write_percent(vm, (access == ACCESS_READ) ? 0 : 100);
memstress_set_write_percent(vm, (access == ACCESS_READ) ? 0 : 100);
iteration_work = ITERATION_ACCESS_MEMORY;
run_iteration(vm, nr_vcpus, description);
}
Expand All @@ -303,10 +303,10 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct kvm_vm *vm;
int nr_vcpus = params->nr_vcpus;

vm = perf_test_create_vm(mode, nr_vcpus, params->vcpu_memory_bytes, 1,
vm = memstress_create_vm(mode, nr_vcpus, params->vcpu_memory_bytes, 1,
params->backing_src, !overlap_memory_access);

perf_test_start_vcpu_threads(nr_vcpus, vcpu_thread_main);
memstress_start_vcpu_threads(nr_vcpus, vcpu_thread_main);

pr_info("\n");
access_memory(vm, nr_vcpus, ACCESS_WRITE, "Populating memory");
Expand All @@ -324,8 +324,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
/* Set done to signal the vCPU threads to exit */
done = true;

perf_test_join_vcpu_threads(nr_vcpus);
perf_test_destroy_vm(vm);
memstress_join_vcpu_threads(nr_vcpus);
memstress_destroy_vm(vm);
}

static void help(char *name)
Expand Down
18 changes: 9 additions & 9 deletions tools/testing/selftests/kvm/demand_paging_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static size_t demand_paging_size;
static char *guest_data_prototype;

static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
{
struct kvm_vcpu *vcpu = vcpu_args->vcpu;
int vcpu_idx = vcpu_args->vcpu_idx;
Expand Down Expand Up @@ -285,7 +285,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct kvm_vm *vm;
int r, i;

vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
p->src_type, p->partition_vcpu_memory_access);

demand_paging_size = get_backing_src_pagesz(p->src_type);
Expand All @@ -307,11 +307,11 @@ static void run_test(enum vm_guest_mode mode, void *arg)
TEST_ASSERT(pipefds, "Unable to allocate memory for pipefd");

for (i = 0; i < nr_vcpus; i++) {
struct perf_test_vcpu_args *vcpu_args;
struct memstress_vcpu_args *vcpu_args;
void *vcpu_hva;
void *vcpu_alias;

vcpu_args = &perf_test_args.vcpu_args[i];
vcpu_args = &memstress_args.vcpu_args[i];

/* Cache the host addresses of the region */
vcpu_hva = addr_gpa2hva(vm, vcpu_args->gpa);
Expand All @@ -329,17 +329,17 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pipefds[i * 2], p->uffd_mode,
p->uffd_delay, &uffd_args[i],
vcpu_hva, vcpu_alias,
vcpu_args->pages * perf_test_args.guest_page_size);
vcpu_args->pages * memstress_args.guest_page_size);
}
}

pr_info("Finished creating vCPUs and starting uffd threads\n");

clock_gettime(CLOCK_MONOTONIC, &start);
perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
memstress_start_vcpu_threads(nr_vcpus, vcpu_worker);
pr_info("Started all vCPUs\n");

perf_test_join_vcpu_threads(nr_vcpus);
memstress_join_vcpu_threads(nr_vcpus);
ts_diff = timespec_elapsed(start);
pr_info("All vCPU threads joined\n");

Expand All @@ -358,10 +358,10 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("Total guest execution time: %ld.%.9lds\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
pr_info("Overall demand paging rate: %f pgs/sec\n",
perf_test_args.vcpu_args[0].pages * nr_vcpus /
memstress_args.vcpu_args[0].pages * nr_vcpus /
((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0));

perf_test_destroy_vm(vm);
memstress_destroy_vm(vm);

free(guest_data_prototype);
if (p->uffd_mode) {
Expand Down
34 changes: 17 additions & 17 deletions tools/testing/selftests/kvm/dirty_log_perf_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ static bool host_quit;
static int iteration;
static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];

static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
{
struct kvm_vcpu *vcpu = vcpu_args->vcpu;
int vcpu_idx = vcpu_args->vcpu_idx;
Expand Down Expand Up @@ -141,7 +141,7 @@ static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
int i;

for (i = 0; i < slots; i++) {
int slot = PERF_TEST_MEM_SLOT_INDEX + i;
int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0;

vm_mem_region_set_flags(vm, slot, flags);
Expand All @@ -163,7 +163,7 @@ static void get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots
int i;

for (i = 0; i < slots; i++) {
int slot = PERF_TEST_MEM_SLOT_INDEX + i;
int slot = MEMSTRESS_MEM_SLOT_INDEX + i;

kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
}
Expand All @@ -175,7 +175,7 @@ static void clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
int i;

for (i = 0; i < slots; i++) {
int slot = PERF_TEST_MEM_SLOT_INDEX + i;
int slot = MEMSTRESS_MEM_SLOT_INDEX + i;

kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
}
Expand Down Expand Up @@ -223,13 +223,13 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct timespec clear_dirty_log_total = (struct timespec){0};
int i;

vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
p->slots, p->backing_src,
p->partition_vcpu_memory_access);

pr_info("Random seed: %u\n", p->random_seed);
perf_test_set_random_seed(vm, p->random_seed);
perf_test_set_write_percent(vm, p->write_percent);
memstress_set_random_seed(vm, p->random_seed);
memstress_set_write_percent(vm, p->write_percent);

guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm->page_shift;
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
Expand Down Expand Up @@ -259,9 +259,9 @@ static void run_test(enum vm_guest_mode mode, void *arg)
* occurring during the dirty memory iterations below, which
* would pollute the performance results.
*/
perf_test_set_write_percent(vm, 100);
perf_test_set_random_access(vm, false);
perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
memstress_set_write_percent(vm, 100);
memstress_set_random_access(vm, false);
memstress_start_vcpu_threads(nr_vcpus, vcpu_worker);

/* Allow the vCPUs to populate memory */
pr_debug("Starting iteration %d - Populating\n", iteration);
Expand All @@ -282,8 +282,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("Enabling dirty logging time: %ld.%.9lds\n\n",
ts_diff.tv_sec, ts_diff.tv_nsec);

perf_test_set_write_percent(vm, p->write_percent);
perf_test_set_random_access(vm, p->random_access);
memstress_set_write_percent(vm, p->write_percent);
memstress_set_random_access(vm, p->random_access);

while (iteration < p->iterations) {
/*
Expand Down Expand Up @@ -345,7 +345,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
* wait for them to exit.
*/
host_quit = true;
perf_test_join_vcpu_threads(nr_vcpus);
memstress_join_vcpu_threads(nr_vcpus);

avg = timespec_div(get_dirty_log_total, p->iterations);
pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
Expand All @@ -361,7 +361,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)

free_bitmaps(bitmaps, p->slots);
arch_cleanup_vm(vm);
perf_test_destroy_vm(vm);
memstress_destroy_vm(vm);
}

static void help(char *name)
Expand Down Expand Up @@ -466,7 +466,7 @@ int main(int argc, char *argv[])
guest_modes_cmdline(optarg);
break;
case 'n':
perf_test_args.nested = true;
memstress_args.nested = true;
break;
case 'o':
p.partition_vcpu_memory_access = false;
Expand Down Expand Up @@ -500,9 +500,9 @@ int main(int argc, char *argv[])
}

if (pcpu_list) {
kvm_parse_vcpu_pinning(pcpu_list, perf_test_args.vcpu_to_pcpu,
kvm_parse_vcpu_pinning(pcpu_list, memstress_args.vcpu_to_pcpu,
nr_vcpus);
perf_test_args.pin_vcpus = true;
memstress_args.pin_vcpus = true;
}

TEST_ASSERT(p.iterations >= 2, "The test should have at least two iterations");
Expand Down
30 changes: 15 additions & 15 deletions tools/testing/selftests/kvm/include/memstress.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@

#define DEFAULT_PER_VCPU_MEM_SIZE (1 << 30) /* 1G */

#define PERF_TEST_MEM_SLOT_INDEX 1
#define MEMSTRESS_MEM_SLOT_INDEX 1

struct perf_test_vcpu_args {
struct memstress_vcpu_args {
uint64_t gpa;
uint64_t gva;
uint64_t pages;
Expand All @@ -29,7 +29,7 @@ struct perf_test_vcpu_args {
int vcpu_idx;
};

struct perf_test_args {
struct memstress_args {
struct kvm_vm *vm;
/* The starting address and size of the guest test region. */
uint64_t gpa;
Expand All @@ -47,26 +47,26 @@ struct perf_test_args {
/* The vCPU=>pCPU pinning map. Only valid if pin_vcpus is true. */
uint32_t vcpu_to_pcpu[KVM_MAX_VCPUS];

struct perf_test_vcpu_args vcpu_args[KVM_MAX_VCPUS];
struct memstress_vcpu_args vcpu_args[KVM_MAX_VCPUS];
};

extern struct perf_test_args perf_test_args;
extern struct memstress_args memstress_args;

struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
uint64_t vcpu_memory_bytes, int slots,
enum vm_mem_backing_src_type backing_src,
bool partition_vcpu_memory_access);
void perf_test_destroy_vm(struct kvm_vm *vm);
void memstress_destroy_vm(struct kvm_vm *vm);

void perf_test_set_write_percent(struct kvm_vm *vm, uint32_t write_percent);
void perf_test_set_random_seed(struct kvm_vm *vm, uint32_t random_seed);
void perf_test_set_random_access(struct kvm_vm *vm, bool random_access);
void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent);
void memstress_set_random_seed(struct kvm_vm *vm, uint32_t random_seed);
void memstress_set_random_access(struct kvm_vm *vm, bool random_access);

void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *));
void perf_test_join_vcpu_threads(int vcpus);
void perf_test_guest_code(uint32_t vcpu_id);
void memstress_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct memstress_vcpu_args *));
void memstress_join_vcpu_threads(int vcpus);
void memstress_guest_code(uint32_t vcpu_id);

uint64_t perf_test_nested_pages(int nr_vcpus);
void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]);
uint64_t memstress_nested_pages(int nr_vcpus);
void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]);

#endif /* SELFTEST_KVM_MEMSTRESS_H */
Loading

0 comments on commit 7812d80

Please sign in to comment.