Skip to content

Commit

Permalink
Merge tag 'kvmarm-fixes-6.16-2' of git://git.kernel.org/pub/scm/linux…
Browse files Browse the repository at this point in the history
…/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 fixes for 6.16, take #2

- Rework of system register accessors for system registers that are
  directly writen to memory, so that sanitisation of the in-memory
  value happens at the correct time (after the read, or before the
  write). For convenience, RMW-style accessors are also provided.

- Multiple fixes for the so-called "arch-timer-edge-cases' selftest,
  which was always broken.
  • Loading branch information
Paolo Bonzini committed Jun 11, 2025
2 parents 19272b3 + fad4cf9 commit ce360c2
Show file tree
Hide file tree
Showing 16 changed files with 151 additions and 116 deletions.
32 changes: 27 additions & 5 deletions arch/arm64/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -1107,14 +1107,36 @@ static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))

u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64);

#define __vcpu_assign_sys_reg(v, r, val) \
do { \
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
u64 __v = (val); \
if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
__v = kvm_vcpu_apply_reg_masks((v), (r), __v); \
\
ctxt_sys_reg(ctxt, (r)) = __v; \
} while (0)

#define __vcpu_rmw_sys_reg(v, r, op, val) \
do { \
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
u64 __v = ctxt_sys_reg(ctxt, (r)); \
__v op (val); \
if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
__v = kvm_vcpu_apply_reg_masks((v), (r), __v); \
\
ctxt_sys_reg(ctxt, (r)) = __v; \
} while (0)

#define __vcpu_sys_reg(v,r) \
(*({ \
({ \
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
u64 *__r = __ctxt_sys_reg(ctxt, (r)); \
u64 __v = ctxt_sys_reg(ctxt, (r)); \
if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
*__r = kvm_vcpu_apply_reg_masks((v), (r), *__r);\
__r; \
}))
__v = kvm_vcpu_apply_reg_masks((v), (r), __v); \
__v; \
})

u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
Expand Down
18 changes: 9 additions & 9 deletions arch/arm64/kvm/arch_timer.c
Original file line number Diff line number Diff line change
Expand Up @@ -108,16 +108,16 @@ static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)

switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
__vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl;
__vcpu_assign_sys_reg(vcpu, CNTV_CTL_EL0, ctl);
break;
case TIMER_PTIMER:
__vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl;
__vcpu_assign_sys_reg(vcpu, CNTP_CTL_EL0, ctl);
break;
case TIMER_HVTIMER:
__vcpu_sys_reg(vcpu, CNTHV_CTL_EL2) = ctl;
__vcpu_assign_sys_reg(vcpu, CNTHV_CTL_EL2, ctl);
break;
case TIMER_HPTIMER:
__vcpu_sys_reg(vcpu, CNTHP_CTL_EL2) = ctl;
__vcpu_assign_sys_reg(vcpu, CNTHP_CTL_EL2, ctl);
break;
default:
WARN_ON(1);
Expand All @@ -130,16 +130,16 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)

switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
__vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval;
__vcpu_assign_sys_reg(vcpu, CNTV_CVAL_EL0, cval);
break;
case TIMER_PTIMER:
__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval;
__vcpu_assign_sys_reg(vcpu, CNTP_CVAL_EL0, cval);
break;
case TIMER_HVTIMER:
__vcpu_sys_reg(vcpu, CNTHV_CVAL_EL2) = cval;
__vcpu_assign_sys_reg(vcpu, CNTHV_CVAL_EL2, cval);
break;
case TIMER_HPTIMER:
__vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = cval;
__vcpu_assign_sys_reg(vcpu, CNTHP_CVAL_EL2, cval);
break;
default:
WARN_ON(1);
Expand Down Expand Up @@ -1036,7 +1036,7 @@ void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
if (vcpu_has_nv(vcpu)) {
struct arch_timer_offset *offs = &vcpu_vtimer(vcpu)->offset;

offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2);
offs->vcpu_offset = __ctxt_sys_reg(&vcpu->arch.ctxt, CNTVOFF_EL2);
offs->vm_offset = &vcpu->kvm->arch.timer_data.poffset;
}

Expand Down
4 changes: 2 additions & 2 deletions arch/arm64/kvm/debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -216,9 +216,9 @@ void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu)
void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val)
{
if (val & OSLAR_EL1_OSLK)
__vcpu_sys_reg(vcpu, OSLSR_EL1) |= OSLSR_EL1_OSLK;
__vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, |=, OSLSR_EL1_OSLK);
else
__vcpu_sys_reg(vcpu, OSLSR_EL1) &= ~OSLSR_EL1_OSLK;
__vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, &=, ~OSLSR_EL1_OSLK);

preempt_disable();
kvm_arch_vcpu_put(vcpu);
Expand Down
4 changes: 2 additions & 2 deletions arch/arm64/kvm/fpsimd.c
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,8 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
fp_state.sve_state = vcpu->arch.sve_state;
fp_state.sve_vl = vcpu->arch.sve_max_vl;
fp_state.sme_state = NULL;
fp_state.svcr = &__vcpu_sys_reg(vcpu, SVCR);
fp_state.fpmr = &__vcpu_sys_reg(vcpu, FPMR);
fp_state.svcr = __ctxt_sys_reg(&vcpu->arch.ctxt, SVCR);
fp_state.fpmr = __ctxt_sys_reg(&vcpu->arch.ctxt, FPMR);
fp_state.fp_type = &vcpu->arch.fp_type;

if (vcpu_has_sve(vcpu))
Expand Down
4 changes: 2 additions & 2 deletions arch/arm64/kvm/hyp/exception.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
if (unlikely(vcpu_has_nv(vcpu)))
vcpu_write_sys_reg(vcpu, val, reg);
else if (!__vcpu_write_sys_reg_to_cpu(val, reg))
__vcpu_sys_reg(vcpu, reg) = val;
__vcpu_assign_sys_reg(vcpu, reg, val);
}

static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode,
Expand All @@ -51,7 +51,7 @@ static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode,
} else if (has_vhe()) {
write_sysreg_el1(val, SYS_SPSR);
} else {
__vcpu_sys_reg(vcpu, SPSR_EL1) = val;
__vcpu_assign_sys_reg(vcpu, SPSR_EL1, val);
}
}

Expand Down
4 changes: 2 additions & 2 deletions arch/arm64/kvm/hyp/include/hyp/switch.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
if (!vcpu_el1_is_32bit(vcpu))
return;

__vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2);
__vcpu_assign_sys_reg(vcpu, FPEXC32_EL2, read_sysreg(fpexc32_el2));
}

static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
Expand Down Expand Up @@ -456,7 +456,7 @@ static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
*/
if (vcpu_has_sve(vcpu)) {
zcr_el1 = read_sysreg_el1(SYS_ZCR);
__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr_el1;
__vcpu_assign_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu), zcr_el1);

/*
* The guest's state is always saved using the guest's max VL.
Expand Down
6 changes: 3 additions & 3 deletions arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
Original file line number Diff line number Diff line change
Expand Up @@ -307,11 +307,11 @@ static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu)
vcpu->arch.ctxt.spsr_irq = read_sysreg(spsr_irq);
vcpu->arch.ctxt.spsr_fiq = read_sysreg(spsr_fiq);

__vcpu_sys_reg(vcpu, DACR32_EL2) = read_sysreg(dacr32_el2);
__vcpu_sys_reg(vcpu, IFSR32_EL2) = read_sysreg(ifsr32_el2);
__vcpu_assign_sys_reg(vcpu, DACR32_EL2, read_sysreg(dacr32_el2));
__vcpu_assign_sys_reg(vcpu, IFSR32_EL2, read_sysreg(ifsr32_el2));

if (has_vhe() || kvm_debug_regs_in_use(vcpu))
__vcpu_sys_reg(vcpu, DBGVCR32_EL2) = read_sysreg(dbgvcr32_el2);
__vcpu_assign_sys_reg(vcpu, DBGVCR32_EL2, read_sysreg(dbgvcr32_el2));
}

static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu)
Expand Down
4 changes: 2 additions & 2 deletions arch/arm64/kvm/hyp/nvhe/hyp-main.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);

static void __hyp_sve_save_guest(struct kvm_vcpu *vcpu)
{
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
__vcpu_assign_sys_reg(vcpu, ZCR_EL1, read_sysreg_el1(SYS_ZCR));
/*
* On saving/restoring guest sve state, always use the maximum VL for
* the guest. The layout of the data when saving the sve state depends
Expand Down Expand Up @@ -79,7 +79,7 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)

has_fpmr = kvm_has_fpmr(kern_hyp_va(vcpu->kvm));
if (has_fpmr)
__vcpu_sys_reg(vcpu, FPMR) = read_sysreg_s(SYS_FPMR);
__vcpu_assign_sys_reg(vcpu, FPMR, read_sysreg_s(SYS_FPMR));

if (system_supports_sve())
__hyp_sve_restore_host();
Expand Down
4 changes: 2 additions & 2 deletions arch/arm64/kvm/hyp/vhe/switch.c
Original file line number Diff line number Diff line change
Expand Up @@ -223,9 +223,9 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
*/
val = read_sysreg_el0(SYS_CNTP_CVAL);
if (map.direct_ptimer == vcpu_ptimer(vcpu))
__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = val;
__vcpu_assign_sys_reg(vcpu, CNTP_CVAL_EL0, val);
if (map.direct_ptimer == vcpu_hptimer(vcpu))
__vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = val;
__vcpu_assign_sys_reg(vcpu, CNTHP_CVAL_EL2, val);

offset = read_sysreg_s(SYS_CNTPOFF_EL2);

Expand Down
48 changes: 24 additions & 24 deletions arch/arm64/kvm/hyp/vhe/sysreg-sr.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,17 +18,17 @@
static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu)
{
/* These registers are common with EL1 */
__vcpu_sys_reg(vcpu, PAR_EL1) = read_sysreg(par_el1);
__vcpu_sys_reg(vcpu, TPIDR_EL1) = read_sysreg(tpidr_el1);

__vcpu_sys_reg(vcpu, ESR_EL2) = read_sysreg_el1(SYS_ESR);
__vcpu_sys_reg(vcpu, AFSR0_EL2) = read_sysreg_el1(SYS_AFSR0);
__vcpu_sys_reg(vcpu, AFSR1_EL2) = read_sysreg_el1(SYS_AFSR1);
__vcpu_sys_reg(vcpu, FAR_EL2) = read_sysreg_el1(SYS_FAR);
__vcpu_sys_reg(vcpu, MAIR_EL2) = read_sysreg_el1(SYS_MAIR);
__vcpu_sys_reg(vcpu, VBAR_EL2) = read_sysreg_el1(SYS_VBAR);
__vcpu_sys_reg(vcpu, CONTEXTIDR_EL2) = read_sysreg_el1(SYS_CONTEXTIDR);
__vcpu_sys_reg(vcpu, AMAIR_EL2) = read_sysreg_el1(SYS_AMAIR);
__vcpu_assign_sys_reg(vcpu, PAR_EL1, read_sysreg(par_el1));
__vcpu_assign_sys_reg(vcpu, TPIDR_EL1, read_sysreg(tpidr_el1));

__vcpu_assign_sys_reg(vcpu, ESR_EL2, read_sysreg_el1(SYS_ESR));
__vcpu_assign_sys_reg(vcpu, AFSR0_EL2, read_sysreg_el1(SYS_AFSR0));
__vcpu_assign_sys_reg(vcpu, AFSR1_EL2, read_sysreg_el1(SYS_AFSR1));
__vcpu_assign_sys_reg(vcpu, FAR_EL2, read_sysreg_el1(SYS_FAR));
__vcpu_assign_sys_reg(vcpu, MAIR_EL2, read_sysreg_el1(SYS_MAIR));
__vcpu_assign_sys_reg(vcpu, VBAR_EL2, read_sysreg_el1(SYS_VBAR));
__vcpu_assign_sys_reg(vcpu, CONTEXTIDR_EL2, read_sysreg_el1(SYS_CONTEXTIDR));
__vcpu_assign_sys_reg(vcpu, AMAIR_EL2, read_sysreg_el1(SYS_AMAIR));

/*
* In VHE mode those registers are compatible between EL1 and EL2,
Expand All @@ -46,21 +46,21 @@ static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu)
* are always trapped, ensuring that the in-memory
* copy is always up-to-date. A small blessing...
*/
__vcpu_sys_reg(vcpu, SCTLR_EL2) = read_sysreg_el1(SYS_SCTLR);
__vcpu_sys_reg(vcpu, TTBR0_EL2) = read_sysreg_el1(SYS_TTBR0);
__vcpu_sys_reg(vcpu, TTBR1_EL2) = read_sysreg_el1(SYS_TTBR1);
__vcpu_sys_reg(vcpu, TCR_EL2) = read_sysreg_el1(SYS_TCR);
__vcpu_assign_sys_reg(vcpu, SCTLR_EL2, read_sysreg_el1(SYS_SCTLR));
__vcpu_assign_sys_reg(vcpu, TTBR0_EL2, read_sysreg_el1(SYS_TTBR0));
__vcpu_assign_sys_reg(vcpu, TTBR1_EL2, read_sysreg_el1(SYS_TTBR1));
__vcpu_assign_sys_reg(vcpu, TCR_EL2, read_sysreg_el1(SYS_TCR));

if (ctxt_has_tcrx(&vcpu->arch.ctxt)) {
__vcpu_sys_reg(vcpu, TCR2_EL2) = read_sysreg_el1(SYS_TCR2);
__vcpu_assign_sys_reg(vcpu, TCR2_EL2, read_sysreg_el1(SYS_TCR2));

if (ctxt_has_s1pie(&vcpu->arch.ctxt)) {
__vcpu_sys_reg(vcpu, PIRE0_EL2) = read_sysreg_el1(SYS_PIRE0);
__vcpu_sys_reg(vcpu, PIR_EL2) = read_sysreg_el1(SYS_PIR);
__vcpu_assign_sys_reg(vcpu, PIRE0_EL2, read_sysreg_el1(SYS_PIRE0));
__vcpu_assign_sys_reg(vcpu, PIR_EL2, read_sysreg_el1(SYS_PIR));
}

if (ctxt_has_s1poe(&vcpu->arch.ctxt))
__vcpu_sys_reg(vcpu, POR_EL2) = read_sysreg_el1(SYS_POR);
__vcpu_assign_sys_reg(vcpu, POR_EL2, read_sysreg_el1(SYS_POR));
}

/*
Expand All @@ -70,13 +70,13 @@ static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu)
*/
val = read_sysreg_el1(SYS_CNTKCTL);
val &= CNTKCTL_VALID_BITS;
__vcpu_sys_reg(vcpu, CNTHCTL_EL2) &= ~CNTKCTL_VALID_BITS;
__vcpu_sys_reg(vcpu, CNTHCTL_EL2) |= val;
__vcpu_rmw_sys_reg(vcpu, CNTHCTL_EL2, &=, ~CNTKCTL_VALID_BITS);
__vcpu_rmw_sys_reg(vcpu, CNTHCTL_EL2, |=, val);
}

__vcpu_sys_reg(vcpu, SP_EL2) = read_sysreg(sp_el1);
__vcpu_sys_reg(vcpu, ELR_EL2) = read_sysreg_el1(SYS_ELR);
__vcpu_sys_reg(vcpu, SPSR_EL2) = read_sysreg_el1(SYS_SPSR);
__vcpu_assign_sys_reg(vcpu, SP_EL2, read_sysreg(sp_el1));
__vcpu_assign_sys_reg(vcpu, ELR_EL2, read_sysreg_el1(SYS_ELR));
__vcpu_assign_sys_reg(vcpu, SPSR_EL2, read_sysreg_el1(SYS_SPSR));
}

static void __sysreg_restore_vel2_state(struct kvm_vcpu *vcpu)
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kvm/nested.c
Original file line number Diff line number Diff line change
Expand Up @@ -1757,7 +1757,7 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)

out:
for (enum vcpu_sysreg sr = __SANITISED_REG_START__; sr < NR_SYS_REGS; sr++)
(void)__vcpu_sys_reg(vcpu, sr);
__vcpu_rmw_sys_reg(vcpu, sr, |=, 0);

return 0;
}
Expand Down
24 changes: 12 additions & 12 deletions arch/arm64/kvm/pmu-emul.c
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force)
val |= lower_32_bits(val);
}

__vcpu_sys_reg(vcpu, reg) = val;
__vcpu_assign_sys_reg(vcpu, reg, val);

/* Recreate the perf event to reflect the updated sample_period */
kvm_pmu_create_perf_event(pmc);
Expand All @@ -204,7 +204,7 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
{
kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
__vcpu_sys_reg(vcpu, counter_index_to_reg(select_idx)) = val;
__vcpu_assign_sys_reg(vcpu, counter_index_to_reg(select_idx), val);
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
}

Expand Down Expand Up @@ -239,7 +239,7 @@ static void kvm_pmu_stop_counter(struct kvm_pmc *pmc)

reg = counter_index_to_reg(pmc->idx);

__vcpu_sys_reg(vcpu, reg) = val;
__vcpu_assign_sys_reg(vcpu, reg, val);

kvm_pmu_release_perf_event(pmc);
}
Expand Down Expand Up @@ -503,14 +503,14 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1;
if (!kvm_pmc_is_64bit(pmc))
reg = lower_32_bits(reg);
__vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg;
__vcpu_assign_sys_reg(vcpu, counter_index_to_reg(i), reg);

/* No overflow? move on */
if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg))
continue;

/* Mark overflow */
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, BIT(i));

if (kvm_pmu_counter_can_chain(pmc))
kvm_pmu_counter_increment(vcpu, BIT(i + 1),
Expand Down Expand Up @@ -556,7 +556,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
perf_event->attr.sample_period = period;
perf_event->hw.sample_period = period;

__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, BIT(idx));

if (kvm_pmu_counter_can_chain(pmc))
kvm_pmu_counter_increment(vcpu, BIT(idx + 1),
Expand Down Expand Up @@ -602,7 +602,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);

/* The reset bits don't indicate any state, and shouldn't be saved. */
__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
__vcpu_assign_sys_reg(vcpu, PMCR_EL0, (val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P)));

if (val & ARMV8_PMU_PMCR_C)
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
Expand Down Expand Up @@ -779,7 +779,7 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
u64 reg;

reg = counter_index_to_evtreg(pmc->idx);
__vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm);
__vcpu_assign_sys_reg(vcpu, reg, (data & kvm_pmu_evtyper_mask(vcpu->kvm)));

kvm_pmu_create_perf_event(pmc);
}
Expand Down Expand Up @@ -914,9 +914,9 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
{
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);

__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask;
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask;
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask;
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, mask);
__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, mask);
__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, mask);

kvm_pmu_reprogram_counter_mask(vcpu, mask);
}
Expand Down Expand Up @@ -1038,7 +1038,7 @@ static void kvm_arm_set_nr_counters(struct kvm *kvm, unsigned int nr)
u64 val = __vcpu_sys_reg(vcpu, MDCR_EL2);
val &= ~MDCR_EL2_HPMN;
val |= FIELD_PREP(MDCR_EL2_HPMN, kvm->arch.nr_pmu_counters);
__vcpu_sys_reg(vcpu, MDCR_EL2) = val;
__vcpu_assign_sys_reg(vcpu, MDCR_EL2, val);
}
}
}
Expand Down
Loading

0 comments on commit ce360c2

Please sign in to comment.