-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
RISC-V: KVM: Add skeleton support for perf
This patch only adds barebone structure of perf implementation. Most of the function returns zero at this point and will be implemented fully in the future. Reviewed-by: Anup Patel <anup@brainfault.org> Reviewed-by: Andrew Jones <ajones@ventanamicro.com> Signed-off-by: Atish Patra <atishp@rivosinc.com> Signed-off-by: Anup Patel <anup@brainfault.org>
- Loading branch information
Atish Patra
authored and
Anup Patel
committed
Feb 7, 2023
1 parent
bae0dfd
commit 8f0153e
Showing
5 changed files
with
227 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,74 @@ | ||
/* SPDX-License-Identifier: GPL-2.0-only */ | ||
/* | ||
* Copyright (c) 2023 Rivos Inc | ||
* | ||
* Authors: | ||
* Atish Patra <atishp@rivosinc.com> | ||
*/ | ||
|
||
#ifndef __KVM_VCPU_RISCV_PMU_H | ||
#define __KVM_VCPU_RISCV_PMU_H | ||
|
||
#include <linux/perf/riscv_pmu.h> | ||
#include <asm/sbi.h> | ||
|
||
#ifdef CONFIG_RISCV_PMU_SBI | ||
#define RISCV_KVM_MAX_FW_CTRS 32 | ||
#define RISCV_KVM_MAX_HW_CTRS 32 | ||
#define RISCV_KVM_MAX_COUNTERS (RISCV_KVM_MAX_HW_CTRS + RISCV_KVM_MAX_FW_CTRS) | ||
static_assert(RISCV_KVM_MAX_COUNTERS <= 64); | ||
|
||
/* Per virtual pmu counter data */ | ||
struct kvm_pmc { | ||
u8 idx; | ||
struct perf_event *perf_event; | ||
u64 counter_val; | ||
union sbi_pmu_ctr_info cinfo; | ||
/* Event monitoring status */ | ||
bool started; | ||
}; | ||
|
||
/* PMU data structure per vcpu */ | ||
struct kvm_pmu { | ||
struct kvm_pmc pmc[RISCV_KVM_MAX_COUNTERS]; | ||
/* Number of the virtual firmware counters available */ | ||
int num_fw_ctrs; | ||
/* Number of the virtual hardware counters available */ | ||
int num_hw_ctrs; | ||
/* A flag to indicate that pmu initialization is done */ | ||
bool init_done; | ||
/* Bit map of all the virtual counter used */ | ||
DECLARE_BITMAP(pmc_in_use, RISCV_KVM_MAX_COUNTERS); | ||
}; | ||
|
||
#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu_context) | ||
#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu_context)) | ||
|
||
int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, struct kvm_vcpu_sbi_return *retdata); | ||
int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx, | ||
struct kvm_vcpu_sbi_return *retdata); | ||
int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base, | ||
unsigned long ctr_mask, unsigned long flags, u64 ival, | ||
struct kvm_vcpu_sbi_return *retdata); | ||
int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base, | ||
unsigned long ctr_mask, unsigned long flags, | ||
struct kvm_vcpu_sbi_return *retdata); | ||
int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base, | ||
unsigned long ctr_mask, unsigned long flags, | ||
unsigned long eidx, u64 evtdata, | ||
struct kvm_vcpu_sbi_return *retdata); | ||
int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx, | ||
struct kvm_vcpu_sbi_return *retdata); | ||
void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu); | ||
void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu); | ||
void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu); | ||
|
||
#else | ||
struct kvm_pmu { | ||
}; | ||
|
||
static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {} | ||
static inline void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) {} | ||
static inline void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) {} | ||
#endif /* CONFIG_RISCV_PMU_SBI */ | ||
#endif /* !__KVM_VCPU_RISCV_PMU_H */ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,141 @@ | ||
// SPDX-License-Identifier: GPL-2.0 | ||
/* | ||
* Copyright (c) 2023 Rivos Inc | ||
* | ||
* Authors: | ||
* Atish Patra <atishp@rivosinc.com> | ||
*/ | ||
|
||
#define pr_fmt(fmt) "riscv-kvm-pmu: " fmt | ||
#include <linux/errno.h> | ||
#include <linux/err.h> | ||
#include <linux/kvm_host.h> | ||
#include <linux/perf/riscv_pmu.h> | ||
#include <asm/csr.h> | ||
#include <asm/kvm_vcpu_sbi.h> | ||
#include <asm/kvm_vcpu_pmu.h> | ||
|
||
#define kvm_pmu_num_counters(pmu) ((pmu)->num_hw_ctrs + (pmu)->num_fw_ctrs) | ||
|
||
int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, | ||
struct kvm_vcpu_sbi_return *retdata) | ||
{ | ||
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); | ||
|
||
retdata->out_val = kvm_pmu_num_counters(kvpmu); | ||
|
||
return 0; | ||
} | ||
|
||
int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx, | ||
struct kvm_vcpu_sbi_return *retdata) | ||
{ | ||
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); | ||
|
||
if (cidx > RISCV_KVM_MAX_COUNTERS || cidx == 1) { | ||
retdata->err_val = SBI_ERR_INVALID_PARAM; | ||
return 0; | ||
} | ||
|
||
retdata->out_val = kvpmu->pmc[cidx].cinfo.value; | ||
|
||
return 0; | ||
} | ||
|
||
int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base, | ||
unsigned long ctr_mask, unsigned long flags, u64 ival, | ||
struct kvm_vcpu_sbi_return *retdata) | ||
{ | ||
/* TODO */ | ||
return 0; | ||
} | ||
|
||
int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base, | ||
unsigned long ctr_mask, unsigned long flags, | ||
struct kvm_vcpu_sbi_return *retdata) | ||
{ | ||
/* TODO */ | ||
return 0; | ||
} | ||
|
||
int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base, | ||
unsigned long ctr_mask, unsigned long flags, | ||
unsigned long eidx, u64 evtdata, | ||
struct kvm_vcpu_sbi_return *retdata) | ||
{ | ||
/* TODO */ | ||
return 0; | ||
} | ||
|
||
int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx, | ||
struct kvm_vcpu_sbi_return *retdata) | ||
{ | ||
/* TODO */ | ||
return 0; | ||
} | ||
|
||
void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) | ||
{ | ||
int i = 0, ret, num_hw_ctrs = 0, hpm_width = 0; | ||
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); | ||
struct kvm_pmc *pmc; | ||
|
||
ret = riscv_pmu_get_hpm_info(&hpm_width, &num_hw_ctrs); | ||
if (ret < 0 || !hpm_width || !num_hw_ctrs) | ||
return; | ||
|
||
/* | ||
* Increase the number of hardware counters to offset the time counter. | ||
*/ | ||
kvpmu->num_hw_ctrs = num_hw_ctrs + 1; | ||
kvpmu->num_fw_ctrs = SBI_PMU_FW_MAX; | ||
|
||
if (kvpmu->num_hw_ctrs > RISCV_KVM_MAX_HW_CTRS) { | ||
pr_warn_once("Limiting the hardware counters to 32 as specified by the ISA"); | ||
kvpmu->num_hw_ctrs = RISCV_KVM_MAX_HW_CTRS; | ||
} | ||
|
||
/* | ||
* There is no correlation between the logical hardware counter and virtual counters. | ||
* However, we need to encode a hpmcounter CSR in the counter info field so that | ||
* KVM can trap n emulate the read. This works well in the migration use case as | ||
* KVM doesn't care if the actual hpmcounter is available in the hardware or not. | ||
*/ | ||
for (i = 0; i < kvm_pmu_num_counters(kvpmu); i++) { | ||
/* TIME CSR shouldn't be read from perf interface */ | ||
if (i == 1) | ||
continue; | ||
pmc = &kvpmu->pmc[i]; | ||
pmc->idx = i; | ||
if (i < kvpmu->num_hw_ctrs) { | ||
pmc->cinfo.type = SBI_PMU_CTR_TYPE_HW; | ||
if (i < 3) | ||
/* CY, IR counters */ | ||
pmc->cinfo.width = 63; | ||
else | ||
pmc->cinfo.width = hpm_width; | ||
/* | ||
* The CSR number doesn't have any relation with the logical | ||
* hardware counters. The CSR numbers are encoded sequentially | ||
* to avoid maintaining a map between the virtual counter | ||
* and CSR number. | ||
*/ | ||
pmc->cinfo.csr = CSR_CYCLE + i; | ||
} else { | ||
pmc->cinfo.type = SBI_PMU_CTR_TYPE_FW; | ||
pmc->cinfo.width = BITS_PER_LONG - 1; | ||
} | ||
} | ||
|
||
kvpmu->init_done = true; | ||
} | ||
|
||
void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) | ||
{ | ||
/* TODO */ | ||
} | ||
|
||
void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) | ||
{ | ||
kvm_riscv_vcpu_pmu_deinit(vcpu); | ||
} |