diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index a9544561397d7..aea3ec657c3ff 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -711,15 +711,15 @@
 #define ID_AA64DFR0_EL1_TraceVer_SHIFT		4
 #define ID_AA64DFR0_EL1_DebugVer_SHIFT		0
 
-#define ID_AA64DFR0_EL1_PMUVer_8_0		0x1
-#define ID_AA64DFR0_EL1_PMUVer_8_1		0x4
-#define ID_AA64DFR0_EL1_PMUVer_8_4		0x5
-#define ID_AA64DFR0_EL1_PMUVer_8_5		0x6
-#define ID_AA64DFR0_EL1_PMUVer_8_7		0x7
+#define ID_AA64DFR0_EL1_PMUVer_IMP		0x1
+#define ID_AA64DFR0_EL1_PMUVer_V3P1		0x4
+#define ID_AA64DFR0_EL1_PMUVer_V3P4		0x5
+#define ID_AA64DFR0_EL1_PMUVer_V3P5		0x6
+#define ID_AA64DFR0_EL1_PMUVer_V3P7		0x7
 #define ID_AA64DFR0_EL1_PMUVer_IMP_DEF		0xf
 
-#define ID_AA64DFR0_EL1_PMSVer_8_2		0x1
-#define ID_AA64DFR0_EL1_PMSVer_8_3		0x2
+#define ID_AA64DFR0_EL1_PMSVer_IMP		0x1
+#define ID_AA64DFR0_EL1_PMSVer_V1P1		0x2
 
 #define ID_DFR0_PERFMON_SHIFT		24
 
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 8b878837d8f16..7b0643fe2f134 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -390,7 +390,7 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = {
  */
 static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
 {
-	return (cpu_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_8_5);
+	return (cpu_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5);
 }
 
 static inline bool armv8pmu_event_has_user_read(struct perf_event *event)
@@ -1172,7 +1172,7 @@ static void __armv8pmu_probe_pmu(void *info)
 			     pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
 
 	/* store PMMIR_EL1 register for sysfs */
-	if (pmuver >= ID_AA64DFR0_EL1_PMUVer_8_4 && (pmceid_raw[1] & BIT(31)))
+	if (pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4 && (pmceid_raw[1] & BIT(31)))
 		cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1);
 	else
 		cpu_pmu->reg_pmmir = 0;
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 7122b5387de6c..0003c7d37533a 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -33,12 +33,12 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
 	pmuver = kvm->arch.arm_pmu->pmuver;
 
 	switch (pmuver) {
-	case ID_AA64DFR0_EL1_PMUVer_8_0:
+	case ID_AA64DFR0_EL1_PMUVer_IMP:
 		return GENMASK(9, 0);
-	case ID_AA64DFR0_EL1_PMUVer_8_1:
-	case ID_AA64DFR0_EL1_PMUVer_8_4:
-	case ID_AA64DFR0_EL1_PMUVer_8_5:
-	case ID_AA64DFR0_EL1_PMUVer_8_7:
+	case ID_AA64DFR0_EL1_PMUVer_V3P1:
+	case ID_AA64DFR0_EL1_PMUVer_V3P4:
+	case ID_AA64DFR0_EL1_PMUVer_V3P5:
+	case ID_AA64DFR0_EL1_PMUVer_V3P7:
 		return GENMASK(15, 0);
 	default:		/* Shouldn't be here, just for sanity */
 		WARN_ONCE(1, "Unknown PMU version %d\n", pmuver);
@@ -856,7 +856,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
 		 * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
 		 * as RAZ
 		 */
-		if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_8_4)
+		if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4)
 			val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
 		base = 32;
 	}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index d4546a5b0ea5b..2ef1121ab844d 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1115,7 +1115,7 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
 		/* Limit guests to PMUv3 for ARMv8.4 */
 		val = cpuid_feature_cap_perfmon_field(val,
 						      ID_AA64DFR0_EL1_PMUVer_SHIFT,
-						      kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_EL1_PMUVer_8_4 : 0);
+						      kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_EL1_PMUVer_V3P4 : 0);
 		/* Hide SPE from guests */
 		val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
 		break;