Skip to content

Commit

Permalink
arm64: mte: CPU feature detection and initial sysreg configuration
Browse files Browse the repository at this point in the history
Add the cpufeature and hwcap entries to detect the presence of MTE. Any
secondary CPU not supporting the feature, if detected on the boot CPU,
will be parked.

Add the minimum SCTLR_EL1 and HCR_EL2 bits for enabling MTE. The Normal
Tagged memory type is configured in MAIR_EL1 before the MMU is enabled
in order to avoid disrupting other CPUs in the CnP domain.

Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Co-developed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Suzuki K Poulose <Suzuki.Poulose@arm.com>
  • Loading branch information
Vincenzo Frascino authored and Catalin Marinas committed Sep 3, 2020
1 parent 0178dc7 commit 3b714d2
Show file tree
Hide file tree
Showing 9 changed files with 54 additions and 5 deletions.
3 changes: 2 additions & 1 deletion arch/arm64/include/asm/cpucaps.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,8 @@
#define ARM64_BTI 54
#define ARM64_HAS_ARMv8_4_TTL 55
#define ARM64_HAS_TLB_RANGE 56
#define ARM64_MTE 57

#define ARM64_NCAPS 57
#define ARM64_NCAPS 58

#endif /* __ASM_CPUCAPS_H */
6 changes: 6 additions & 0 deletions arch/arm64/include/asm/cpufeature.h
Original file line number Diff line number Diff line change
Expand Up @@ -681,6 +681,12 @@ static __always_inline bool system_uses_irq_prio_masking(void)
cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
}

static inline bool system_supports_mte(void)
{
return IS_ENABLED(CONFIG_ARM64_MTE) &&
cpus_have_const_cap(ARM64_MTE);
}

static inline bool system_has_prio_mask_debugging(void)
{
return IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING) &&
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/include/asm/hwcap.h
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@
#define KERNEL_HWCAP_DGH __khwcap2_feature(DGH)
#define KERNEL_HWCAP_RNG __khwcap2_feature(RNG)
#define KERNEL_HWCAP_BTI __khwcap2_feature(BTI)
/* reserved for KERNEL_HWCAP_MTE __khwcap2_feature(MTE) */
#define KERNEL_HWCAP_MTE __khwcap2_feature(MTE)

/*
* This yields a mask that user programs can use to figure out what
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/include/asm/kvm_arm.h
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
HCR_FMO | HCR_IMO | HCR_PTW )
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)

/* TCR_EL2 Registers bits */
Expand Down
1 change: 1 addition & 0 deletions arch/arm64/include/asm/sysreg.h
Original file line number Diff line number Diff line change
Expand Up @@ -613,6 +613,7 @@
SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I |\
SCTLR_EL1_DZE | SCTLR_EL1_UCT |\
SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\
SCTLR_ELx_ITFSB| SCTLR_ELx_ATA | SCTLR_EL1_ATA0 |\
ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1)

/* MAIR_ELx memory attributes (used by Linux) */
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/include/uapi/asm/hwcap.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,6 @@
#define HWCAP2_DGH (1 << 15)
#define HWCAP2_RNG (1 << 16)
#define HWCAP2_BTI (1 << 17)
/* reserved for HWCAP2_MTE (1 << 18) */
#define HWCAP2_MTE (1 << 18)

#endif /* _UAPI__ASM_HWCAP_H */
17 changes: 17 additions & 0 deletions arch/arm64/kernel/cpufeature.c
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,8 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MTE_SHIFT, 4, ID_AA64PFR1_MTE_NI),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0),
Expand Down Expand Up @@ -2121,6 +2123,18 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.sign = FTR_UNSIGNED,
},
#endif
#ifdef CONFIG_ARM64_MTE
{
.desc = "Memory Tagging Extension",
.capability = ARM64_MTE,
.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR1_EL1,
.field_pos = ID_AA64PFR1_MTE_SHIFT,
.min_field_value = ID_AA64PFR1_MTE,
.sign = FTR_UNSIGNED,
},
#endif /* CONFIG_ARM64_MTE */
{},
};

Expand Down Expand Up @@ -2237,6 +2251,9 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA),
HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG),
#endif
#ifdef CONFIG_ARM64_MTE
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE),
#endif /* CONFIG_ARM64_MTE */
{},
};

Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kernel/cpuinfo.c
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ static const char *const hwcap_str[] = {
"dgh",
"rng",
"bti",
/* reserved for "mte" */
"mte",
NULL
};

Expand Down
24 changes: 24 additions & 0 deletions arch/arm64/mm/proc.S
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include <asm/cpufeature.h>
#include <asm/alternative.h>
#include <asm/smp.h>
#include <asm/sysreg.h>

#ifdef CONFIG_ARM64_64K_PAGES
#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K
Expand Down Expand Up @@ -425,6 +426,29 @@ SYM_FUNC_START(__cpu_setup)
* Memory region attributes
*/
mov_q x5, MAIR_EL1_SET
#ifdef CONFIG_ARM64_MTE
/*
* Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported
* (ID_AA64PFR1_EL1[11:8] > 1).
*/
mrs x10, ID_AA64PFR1_EL1
ubfx x10, x10, #ID_AA64PFR1_MTE_SHIFT, #4
cmp x10, #ID_AA64PFR1_MTE
b.lt 1f

/* Normal Tagged memory type at the corresponding MAIR index */
mov x10, #MAIR_ATTR_NORMAL_TAGGED
bfi x5, x10, #(8 * MT_NORMAL_TAGGED), #8

/* initialize GCR_EL1: all non-zero tags excluded by default */
mov x10, #(SYS_GCR_EL1_RRND | SYS_GCR_EL1_EXCL_MASK)
msr_s SYS_GCR_EL1, x10

/* clear any pending tag check faults in TFSR*_EL1 */
msr_s SYS_TFSR_EL1, xzr
msr_s SYS_TFSRE0_EL1, xzr
1:
#endif
msr mair_el1, x5
/*
* Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
Expand Down

0 comments on commit 3b714d2

Please sign in to comment.