Skip to content

Commit

Permalink
KVM: arm64: PMU: Assume PMU presence in pmu-emul.c
Browse files Browse the repository at this point in the history
Many functions in pmu-emul.c checks kvm_vcpu_has_pmu(vcpu). A favorable
interpretation is defensive programming, but it also has downsides:

- It is confusing as it implies these functions are called without PMU
  although most of them are called only when a PMU is present.

- It makes semantics of functions fuzzy. For example, calling
  kvm_pmu_disable_counter_mask() without PMU may result in no-op as
  there are no enabled counters, but it's unclear what
  kvm_pmu_get_counter_value() returns when there is no PMU.

- It allows callers without checking kvm_vcpu_has_pmu(vcpu), but it is
  often wrong to call these functions without PMU.

- It is error-prone to duplicate kvm_vcpu_has_pmu(vcpu) checks into
  multiple functions. Many functions are called for system registers,
  and the system register infrastructure already employs less
  error-prone, comprehensive checks.

Check kvm_vcpu_has_pmu(vcpu) in callers of these functions instead,
and remove the obsolete checks from pmu-emul.c. The only exceptions are
the functions that implement ioctls as they have definitive semantics
even when the PMU is not present.

Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20250315-pmc-v5-2-ecee87dab216@daynix.com
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
  • Loading branch information
Akihiko Odaki authored and Oliver Upton committed Mar 17, 2025
1 parent f2aeb7b commit be5ccac
Show file tree
Hide file tree
Showing 4 changed files with 20 additions and 35 deletions.
17 changes: 11 additions & 6 deletions arch/arm64/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -835,9 +835,11 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
if (ret)
return ret;

ret = kvm_arm_pmu_v3_enable(vcpu);
if (ret)
return ret;
if (kvm_vcpu_has_pmu(vcpu)) {
ret = kvm_arm_pmu_v3_enable(vcpu);
if (ret)
return ret;
}

if (is_protected_kvm_enabled()) {
ret = pkvm_create_hyp_vm(kvm);
Expand Down Expand Up @@ -1148,7 +1150,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
preempt_disable();

kvm_pmu_flush_hwstate(vcpu);
if (kvm_vcpu_has_pmu(vcpu))
kvm_pmu_flush_hwstate(vcpu);

local_irq_disable();

Expand All @@ -1167,7 +1170,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
if (ret <= 0 || kvm_vcpu_exit_request(vcpu, &ret)) {
vcpu->mode = OUTSIDE_GUEST_MODE;
isb(); /* Ensure work in x_flush_hwstate is committed */
kvm_pmu_sync_hwstate(vcpu);
if (kvm_vcpu_has_pmu(vcpu))
kvm_pmu_sync_hwstate(vcpu);
if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
kvm_timer_sync_user(vcpu);
kvm_vgic_sync_hwstate(vcpu);
Expand Down Expand Up @@ -1197,7 +1201,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
* that the vgic can properly sample the updated state of the
* interrupt line.
*/
kvm_pmu_sync_hwstate(vcpu);
if (kvm_vcpu_has_pmu(vcpu))
kvm_pmu_sync_hwstate(vcpu);

/*
* Sync the vgic state before syncing the timer state because
Expand Down
6 changes: 4 additions & 2 deletions arch/arm64/kvm/emulate-nested.c
Original file line number Diff line number Diff line change
Expand Up @@ -2516,7 +2516,8 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
kvm_arch_vcpu_load(vcpu, smp_processor_id());
preempt_enable();

kvm_pmu_nested_transition(vcpu);
if (kvm_vcpu_has_pmu(vcpu))
kvm_pmu_nested_transition(vcpu);
}

static void kvm_inject_el2_exception(struct kvm_vcpu *vcpu, u64 esr_el2,
Expand Down Expand Up @@ -2599,7 +2600,8 @@ static int kvm_inject_nested(struct kvm_vcpu *vcpu, u64 esr_el2,
kvm_arch_vcpu_load(vcpu, smp_processor_id());
preempt_enable();

kvm_pmu_nested_transition(vcpu);
if (kvm_vcpu_has_pmu(vcpu))
kvm_pmu_nested_transition(vcpu);

return 1;
}
Expand Down
26 changes: 1 addition & 25 deletions arch/arm64/kvm/pmu-emul.c
Original file line number Diff line number Diff line change
Expand Up @@ -150,9 +150,6 @@ static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc)
*/
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
{
if (!kvm_vcpu_has_pmu(vcpu))
return 0;

return kvm_pmu_get_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
}

Expand Down Expand Up @@ -191,9 +188,6 @@ static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force)
*/
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
{
if (!kvm_vcpu_has_pmu(vcpu))
return;

kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx), val, false);
}

Expand Down Expand Up @@ -350,7 +344,7 @@ void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val)
{
int i;

if (!kvm_vcpu_has_pmu(vcpu) || !val)
if (!val)
return;

for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
Expand Down Expand Up @@ -401,9 +395,6 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
struct kvm_pmu *pmu = &vcpu->arch.pmu;
bool overflow;

if (!kvm_vcpu_has_pmu(vcpu))
return;

overflow = kvm_pmu_overflow_status(vcpu);
if (pmu->irq_level == overflow)
return;
Expand Down Expand Up @@ -599,9 +590,6 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
{
int i;

if (!kvm_vcpu_has_pmu(vcpu))
return;

/* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
val &= ~ARMV8_PMU_PMCR_LP;
Expand Down Expand Up @@ -766,9 +754,6 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx);
u64 reg;

if (!kvm_vcpu_has_pmu(vcpu))
return;

reg = counter_index_to_evtreg(pmc->idx);
__vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm);

Expand Down Expand Up @@ -848,9 +833,6 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
u64 val, mask = 0;
int base, i, nr_events;

if (!kvm_vcpu_has_pmu(vcpu))
return 0;

if (!pmceid1) {
val = read_sysreg(pmceid0_el0);
/* always support CHAIN */
Expand Down Expand Up @@ -900,9 +882,6 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)

int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
{
if (!kvm_vcpu_has_pmu(vcpu))
return 0;

if (!vcpu->arch.pmu.created)
return -EINVAL;

Expand Down Expand Up @@ -1231,9 +1210,6 @@ void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu)
unsigned long mask;
int i;

if (!kvm_vcpu_has_pmu(vcpu))
return;

mask = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
for_each_set_bit(i, &mask, 32) {
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
Expand Down
6 changes: 4 additions & 2 deletions arch/arm64/kvm/sys_regs.c
Original file line number Diff line number Diff line change
Expand Up @@ -1853,12 +1853,14 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
u8 perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
u8 perfmon;
u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);

val &= ~ID_DFR0_EL1_PerfMon_MASK;
if (kvm_vcpu_has_pmu(vcpu))
if (kvm_vcpu_has_pmu(vcpu)) {
perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
}

val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8);

Expand Down

0 comments on commit be5ccac

Please sign in to comment.