Skip to content

Commit

Permalink
Merge branch 'for-next/kernel-ptrauth' into for-next/core
Browse files Browse the repository at this point in the history
* for-next/kernel-ptrauth:
  : Return address signing - in-kernel support
  arm64: Kconfig: verify binutils support for ARM64_PTR_AUTH
  lkdtm: arm64: test kernel pointer authentication
  arm64: compile the kernel with ptrauth return address signing
  kconfig: Add support for 'as-option'
  arm64: suspend: restore the kernel ptrauth keys
  arm64: __show_regs: strip PAC from lr in printk
  arm64: unwind: strip PAC from kernel addresses
  arm64: mask PAC bits of __builtin_return_address
  arm64: initialize ptrauth keys for kernel booting task
  arm64: initialize and switch ptrauth kernel keys
  arm64: enable ptrauth earlier
  arm64: cpufeature: handle conflicts based on capability
  arm64: cpufeature: Move cpu capability helpers inside C file
  arm64: ptrauth: Add bootup/runtime flags for __cpu_setup
  arm64: install user ptrauth keys at kernel exit time
  arm64: rename ptrauth key structures to be user-specific
  arm64: cpufeature: add pointer auth meta-capabilities
  arm64: cpufeature: Fix meta-capability cpufeature check
Catalin Marinas committed Mar 25, 2020
2 parents 806dc82 + 3b446c7 commit 44ca0e0
Showing 26 changed files with 427 additions and 100 deletions.
35 changes: 34 additions & 1 deletion arch/arm64/Kconfig
Original file line number Diff line number Diff line change
@@ -118,6 +118,7 @@ config ARM64
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_BITREVERSE
select HAVE_ARCH_COMPILER_H
select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
@@ -1501,23 +1502,55 @@ config ARM64_PTR_AUTH
bool "Enable support for pointer authentication"
default y
depends on !KVM || ARM64_VHE
depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
depends on CC_IS_GCC || (CC_IS_CLANG && AS_HAS_CFI_NEGATE_RA_STATE)
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
help
Pointer authentication (part of the ARMv8.3 Extensions) provides
instructions for signing and authenticating pointers against secret
keys, which can be used to mitigate Return Oriented Programming (ROP)
and other attacks.

This option enables these instructions at EL0 (i.e. for userspace).

Choosing this option will cause the kernel to initialise secret keys
for each process at exec() time, with these keys being
context-switched along with the process.

If the compiler supports the -mbranch-protection or
-msign-return-address flag (e.g. GCC 7 or later), then this option
will also cause the kernel itself to be compiled with return address
protection. In this case, and if the target hardware is known to
support pointer authentication, then CONFIG_STACKPROTECTOR can be
disabled with minimal loss of protection.

The feature is detected at runtime. If the feature is not present in
hardware it will not be advertised to userspace/KVM guest nor will it
be enabled. However, KVM guest also require VHE mode and hence
CONFIG_ARM64_VHE=y option to use this feature.

If the feature is present on the boot CPU but not on a late CPU, then
the late CPU will be parked. Also, if the boot CPU does not have
address auth and the late CPU has then the late CPU will still boot
but with the feature disabled. On such a system, this option should
not be selected.

This feature works with FUNCTION_GRAPH_TRACER option only if
DYNAMIC_FTRACE_WITH_REGS is enabled.

config CC_HAS_BRANCH_PROT_PAC_RET
# GCC 9 or later, clang 8 or later
def_bool $(cc-option,-mbranch-protection=pac-ret+leaf)

config CC_HAS_SIGN_RETURN_ADDRESS
# GCC 7, 8
def_bool $(cc-option,-msign-return-address=all)

config AS_HAS_PAC
def_bool $(as-option,-Wa$(comma)-march=armv8.3-a)

config AS_HAS_CFI_NEGATE_RA_STATE
def_bool $(as-instr,.cfi_startproc\n.cfi_negate_ra_state\n.cfi_endproc\n)

endmenu

menu "ARMv8.4 architectural features"
11 changes: 11 additions & 0 deletions arch/arm64/Makefile
Original file line number Diff line number Diff line change
@@ -65,6 +65,17 @@ stack_protector_prepare: prepare0
include/generated/asm-offsets.h))
endif

ifeq ($(CONFIG_ARM64_PTR_AUTH),y)
branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all
branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=pac-ret+leaf
# -march=armv8.3-a enables the non-nops instructions for PAC, to avoid the
# compiler to generate them and consequently to break the single image contract
# we pass it only to the assembler. This option is utilized only in case of non
# integrated assemblers.
branch-prot-flags-$(CONFIG_AS_HAS_PAC) += -Wa,-march=armv8.3-a
KBUILD_CFLAGS += $(branch-prot-flags-y)
endif

ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__AARCH64EB__
65 changes: 65 additions & 0 deletions arch/arm64/include/asm/asm_pointer_auth.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ASM_POINTER_AUTH_H
#define __ASM_ASM_POINTER_AUTH_H

#include <asm/alternative.h>
#include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>

#ifdef CONFIG_ARM64_PTR_AUTH
/*
* thread.keys_user.ap* as offset exceeds the #imm offset range
* so use the base value of ldp as thread.keys_user and offset as
* thread.keys_user.ap*.
*/
.macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
mov \tmp1, #THREAD_KEYS_USER
add \tmp1, \tsk, \tmp1
alternative_if_not ARM64_HAS_ADDRESS_AUTH
b .Laddr_auth_skip_\@
alternative_else_nop_endif
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA]
msr_s SYS_APIAKEYLO_EL1, \tmp2
msr_s SYS_APIAKEYHI_EL1, \tmp3
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIB]
msr_s SYS_APIBKEYLO_EL1, \tmp2
msr_s SYS_APIBKEYHI_EL1, \tmp3
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDA]
msr_s SYS_APDAKEYLO_EL1, \tmp2
msr_s SYS_APDAKEYHI_EL1, \tmp3
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDB]
msr_s SYS_APDBKEYLO_EL1, \tmp2
msr_s SYS_APDBKEYHI_EL1, \tmp3
.Laddr_auth_skip_\@:
alternative_if ARM64_HAS_GENERIC_AUTH
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APGA]
msr_s SYS_APGAKEYLO_EL1, \tmp2
msr_s SYS_APGAKEYHI_EL1, \tmp3
alternative_else_nop_endif
.endm

.macro ptrauth_keys_install_kernel tsk, sync, tmp1, tmp2, tmp3
alternative_if ARM64_HAS_ADDRESS_AUTH
mov \tmp1, #THREAD_KEYS_KERNEL
add \tmp1, \tsk, \tmp1
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_KERNEL_KEY_APIA]
msr_s SYS_APIAKEYLO_EL1, \tmp2
msr_s SYS_APIAKEYHI_EL1, \tmp3
.if \sync == 1
isb
.endif
alternative_else_nop_endif
.endm

#else /* CONFIG_ARM64_PTR_AUTH */

.macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
.endm

.macro ptrauth_keys_install_kernel tsk, sync, tmp1, tmp2, tmp3
.endm

#endif /* CONFIG_ARM64_PTR_AUTH */

#endif /* __ASM_ASM_POINTER_AUTH_H */
24 changes: 24 additions & 0 deletions arch/arm64/include/asm/compiler.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_COMPILER_H
#define __ASM_COMPILER_H

#if defined(CONFIG_ARM64_PTR_AUTH)

/*
* The EL0/EL1 pointer bits used by a pointer authentication code.
* This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply.
*/
#define ptrauth_user_pac_mask() GENMASK_ULL(54, vabits_actual)
#define ptrauth_kernel_pac_mask() GENMASK_ULL(63, vabits_actual)

/* Valid for EL0 TTBR0 and EL1 TTBR1 instruction pointers */
#define ptrauth_clear_pac(ptr) \
((ptr & BIT_ULL(55)) ? (ptr | ptrauth_kernel_pac_mask()) : \
(ptr & ~ptrauth_user_pac_mask()))

#define __builtin_return_address(val) \
(void *)(ptrauth_clear_pac((unsigned long)__builtin_return_address(val)))

#endif /* CONFIG_ARM64_PTR_AUTH */

#endif /* __ASM_COMPILER_H */
4 changes: 3 additions & 1 deletion arch/arm64/include/asm/cpucaps.h
Original file line number Diff line number Diff line change
@@ -59,7 +59,9 @@
#define ARM64_HAS_E0PD 49
#define ARM64_HAS_RNG 50
#define ARM64_HAS_AMU_EXTN 51
#define ARM64_HAS_ADDRESS_AUTH 52
#define ARM64_HAS_GENERIC_AUTH 53

#define ARM64_NCAPS 52
#define ARM64_NCAPS 54

#endif /* __ASM_CPUCAPS_H */
39 changes: 21 additions & 18 deletions arch/arm64/include/asm/cpufeature.h
Original file line number Diff line number Diff line change
@@ -208,6 +208,10 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
* In some non-typical cases either both (a) and (b), or neither,
* should be permitted. This can be described by including neither
* or both flags in the capability's type field.
*
* In case of a conflict, the CPU is prevented from booting. If the
* ARM64_CPUCAP_PANIC_ON_CONFLICT flag is specified for the capability,
* then a kernel panic is triggered.
*/


@@ -240,6 +244,8 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
#define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4))
/* Is it safe for a late CPU to miss this capability when system has it */
#define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
/* Panic when a conflict is detected */
#define ARM64_CPUCAP_PANIC_ON_CONFLICT ((u16)BIT(6))

/*
* CPU errata workarounds that need to be enabled at boot time if one or
@@ -279,9 +285,20 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;

/*
* CPU feature used early in the boot based on the boot CPU. All secondary
* CPUs must match the state of the capability as detected by the boot CPU.
* CPUs must match the state of the capability as detected by the boot CPU. In
* case of a conflict, a kernel panic is triggered.
*/
#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE \
(ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT)

/*
* CPU feature used early in the boot based on the boot CPU. It is safe for a
* late CPU to have this feature even though the boot CPU hasn't enabled it,
* although the feature will not be used by Linux in this case. If the boot CPU
* has enabled this feature already, then every late CPU must have it.
*/
#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU
#define ARM64_CPUCAP_BOOT_CPU_FEATURE \
(ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)

struct arm64_cpu_capabilities {
const char *desc;
@@ -340,18 +357,6 @@ static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
return cap->type & ARM64_CPUCAP_SCOPE_MASK;
}

static inline bool
cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
{
return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
}

static inline bool
cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
{
return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
}

/*
* Generic helper for handling capabilties with multiple (match,enable) pairs
* of call backs, sharing the same capability bit.
@@ -654,15 +659,13 @@ static inline bool system_supports_cnp(void)
static inline bool system_supports_address_auth(void)
{
return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
(cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF));
cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH);
}

static inline bool system_supports_generic_auth(void)
{
return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
(cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_ARCH) ||
cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF));
cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
}

static inline bool system_uses_irq_prio_masking(void)
50 changes: 22 additions & 28 deletions arch/arm64/include/asm/pointer_auth.h
Original file line number Diff line number Diff line change
@@ -22,15 +22,19 @@ struct ptrauth_key {
* We give each process its own keys, which are shared by all threads. The keys
* are inherited upon fork(), and reinitialised upon exec*().
*/
struct ptrauth_keys {
struct ptrauth_keys_user {
struct ptrauth_key apia;
struct ptrauth_key apib;
struct ptrauth_key apda;
struct ptrauth_key apdb;
struct ptrauth_key apga;
};

static inline void ptrauth_keys_init(struct ptrauth_keys *keys)
struct ptrauth_keys_kernel {
struct ptrauth_key apia;
};

static inline void ptrauth_keys_init_user(struct ptrauth_keys_user *keys)
{
if (system_supports_address_auth()) {
get_random_bytes(&keys->apia, sizeof(keys->apia));
@@ -50,48 +54,38 @@ do { \
write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \
} while (0)

static inline void ptrauth_keys_switch(struct ptrauth_keys *keys)
static __always_inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys)
{
if (system_supports_address_auth()) {
__ptrauth_key_install(APIA, keys->apia);
__ptrauth_key_install(APIB, keys->apib);
__ptrauth_key_install(APDA, keys->apda);
__ptrauth_key_install(APDB, keys->apdb);
}
if (system_supports_address_auth())
get_random_bytes(&keys->apia, sizeof(keys->apia));
}

if (system_supports_generic_auth())
__ptrauth_key_install(APGA, keys->apga);
static __always_inline void ptrauth_keys_switch_kernel(struct ptrauth_keys_kernel *keys)
{
if (system_supports_address_auth())
__ptrauth_key_install(APIA, keys->apia);
}

extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg);

/*
* The EL0 pointer bits used by a pointer authentication code.
* This is dependent on TBI0 being enabled, or bits 63:56 would also apply.
*/
#define ptrauth_user_pac_mask() GENMASK(54, vabits_actual)

/* Only valid for EL0 TTBR0 instruction pointers */
static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
{
return ptr & ~ptrauth_user_pac_mask();
return ptrauth_clear_pac(ptr);
}

#define ptrauth_thread_init_user(tsk) \
do { \
struct task_struct *__ptiu_tsk = (tsk); \
ptrauth_keys_init(&__ptiu_tsk->thread.keys_user); \
ptrauth_keys_switch(&__ptiu_tsk->thread.keys_user); \
} while (0)

#define ptrauth_thread_switch(tsk) \
ptrauth_keys_switch(&(tsk)->thread.keys_user)
ptrauth_keys_init_user(&(tsk)->thread.keys_user)
#define ptrauth_thread_init_kernel(tsk) \
ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel)
#define ptrauth_thread_switch_kernel(tsk) \
ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel)

#else /* CONFIG_ARM64_PTR_AUTH */
#define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL)
#define ptrauth_strip_insn_pac(lr) (lr)
#define ptrauth_thread_init_user(tsk)
#define ptrauth_thread_switch(tsk)
#define ptrauth_thread_init_kernel(tsk)
#define ptrauth_thread_switch_kernel(tsk)
#endif /* CONFIG_ARM64_PTR_AUTH */

#endif /* __ASM_POINTER_AUTH_H */
3 changes: 2 additions & 1 deletion arch/arm64/include/asm/processor.h
Original file line number Diff line number Diff line change
@@ -146,7 +146,8 @@ struct thread_struct {
unsigned long fault_code; /* ESR_EL1 value */
struct debug_info debug; /* debugging */
#ifdef CONFIG_ARM64_PTR_AUTH
struct ptrauth_keys keys_user;
struct ptrauth_keys_user keys_user;
struct ptrauth_keys_kernel keys_kernel;
#endif
};

12 changes: 12 additions & 0 deletions arch/arm64/include/asm/smp.h
Original file line number Diff line number Diff line change
@@ -23,13 +23,22 @@
#define CPU_STUCK_REASON_52_BIT_VA (UL(1) << CPU_STUCK_REASON_SHIFT)
#define CPU_STUCK_REASON_NO_GRAN (UL(2) << CPU_STUCK_REASON_SHIFT)

/* Possible options for __cpu_setup */
/* Option to setup primary cpu */
#define ARM64_CPU_BOOT_PRIMARY (1)
/* Option to setup secondary cpus */
#define ARM64_CPU_BOOT_SECONDARY (2)
/* Option to setup cpus for different cpu run time services */
#define ARM64_CPU_RUNTIME (3)

#ifndef __ASSEMBLY__

#include <asm/percpu.h>

#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/thread_info.h>
#include <asm/pointer_auth.h>

DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);

@@ -87,6 +96,9 @@ asmlinkage void secondary_start_kernel(void);
struct secondary_data {
void *stack;
struct task_struct *task;
#ifdef CONFIG_ARM64_PTR_AUTH
struct ptrauth_keys_kernel ptrauth_key;
#endif
long status;
};

Loading

0 comments on commit 44ca0e0

Please sign in to comment.