Skip to content

Commit

Permalink
arm64: errata: Assume that unknown CPUs _are_ vulnerable to Spectre BHB
Browse files Browse the repository at this point in the history
The code for detecting CPUs that are vulnerable to Spectre BHB was
based on a hardcoded list of CPU IDs that were known to be affected.
Unfortunately, the list mostly only contained the IDs of standard ARM
cores. The IDs for many cores that are minor variants of the standard
ARM cores (like many Qualcomm Kyro CPUs) weren't listed. This led the
code to assume that those variants were not affected.

Flip the code on its head and instead assume that a core is vulnerable
if it doesn't have CSV2_3 but is unrecognized as being safe. This
involves creating a "Spectre BHB safe" list.

As of right now, the only CPU IDs added to the "Spectre BHB safe" list
are ARM Cortex A35, A53, A55, A510, and A520. This list was created by
looking for cores that weren't listed in ARM's list [1] as per review
feedback on v2 of this patch [2]. Additionally Brahma A53 is added as
per mailing list feedback [3].

NOTE: this patch will not actually _mitigate_ anyone, it will simply
cause them to report themselves as vulnerable. If any cores in the
system are reported as vulnerable but not mitigated then the whole
system will be reported as vulnerable though the system will attempt
to mitigate with the information it has about the known cores.

[1] https://developer.arm.com/Arm%20Security%20Center/Spectre-BHB
[2] https://lore.kernel.org/r/20241219175128.GA25477@willie-the-truck
[3] https://lore.kernel.org/r/18dbd7d1-a46c-4112-a425-320c99f67a8d@broadcom.com

Fixes: 558c303 ("arm64: Mitigate spectre style branch history side channels")
Cc: stable@vger.kernel.org
Reviewed-by: Julius Werner <jwerner@chromium.org>
Signed-off-by: Douglas Anderson <dianders@chromium.org>
Link: https://lore.kernel.org/r/20250107120555.v4.2.I2040fa004dafe196243f67ebcc647cbedbb516e6@changeid
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
  • Loading branch information
Douglas Anderson authored and Catalin Marinas committed Mar 14, 2025
1 parent ed1ce84 commit e403e85
Show file tree
Hide file tree
Showing 2 changed files with 102 additions and 102 deletions.
1 change: 0 additions & 1 deletion arch/arm64/include/asm/spectre.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,6 @@ enum mitigation_state arm64_get_meltdown_state(void);

enum mitigation_state arm64_get_spectre_bhb_state(void);
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
u8 spectre_bhb_loop_affected(int scope);
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr);

Expand Down
203 changes: 102 additions & 101 deletions arch/arm64/kernel/proton-pack.c
Original file line number Diff line number Diff line change
Expand Up @@ -845,53 +845,70 @@ static unsigned long system_bhb_mitigations;
* This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
* SCOPE_SYSTEM call will give the right answer.
*/
u8 spectre_bhb_loop_affected(int scope)
static bool is_spectre_bhb_safe(int scope)
{
static const struct midr_range spectre_bhb_safe_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A510),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A520),
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
{},
};
static bool all_safe = true;

if (scope != SCOPE_LOCAL_CPU)
return all_safe;

if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_safe_list))
return true;

all_safe = false;

return false;
}

static u8 spectre_bhb_loop_affected(void)
{
u8 k = 0;
static u8 max_bhb_k;

if (scope == SCOPE_LOCAL_CPU) {
static const struct midr_range spectre_bhb_k32_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
{},
};
static const struct midr_range spectre_bhb_k24_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD),
{},
};
static const struct midr_range spectre_bhb_k11_list[] = {
MIDR_ALL_VERSIONS(MIDR_AMPERE1),
{},
};
static const struct midr_range spectre_bhb_k8_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
{},
};

if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
k = 32;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
k = 24;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
k = 11;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
k = 8;

max_bhb_k = max(max_bhb_k, k);
} else {
k = max_bhb_k;
}

static const struct midr_range spectre_bhb_k32_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
{},
};
static const struct midr_range spectre_bhb_k24_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD),
{},
};
static const struct midr_range spectre_bhb_k11_list[] = {
MIDR_ALL_VERSIONS(MIDR_AMPERE1),
{},
};
static const struct midr_range spectre_bhb_k8_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
{},
};

if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
k = 32;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
k = 24;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
k = 11;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
k = 8;

return k;
}
Expand All @@ -917,29 +934,13 @@ static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
}
}

static bool is_spectre_bhb_fw_affected(int scope)
static bool has_spectre_bhb_fw_mitigation(void)
{
static bool system_affected;
enum mitigation_state fw_state;
bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
{},
};
bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
spectre_bhb_firmware_mitigated_list);

if (scope != SCOPE_LOCAL_CPU)
return system_affected;

fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
system_affected = true;
return true;
}

return false;
return has_smccc && fw_state == SPECTRE_MITIGATED;
}

static bool supports_ecbhb(int scope)
Expand All @@ -955,6 +956,8 @@ static bool supports_ecbhb(int scope)
ID_AA64MMFR1_EL1_ECBHB_SHIFT);
}

static u8 max_bhb_k;

bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
int scope)
{
Expand All @@ -963,16 +966,18 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
if (supports_csv2p3(scope))
return false;

if (supports_clearbhb(scope))
return true;

if (spectre_bhb_loop_affected(scope))
return true;
if (is_spectre_bhb_safe(scope))
return false;

if (is_spectre_bhb_fw_affected(scope))
return true;
/*
* At this point the core isn't known to be "safe" so we're going to
* assume it's vulnerable. We still need to update `max_bhb_k` though,
* but only if we aren't mitigating with clearbhb though.
*/
if (scope == SCOPE_LOCAL_CPU && !supports_clearbhb(SCOPE_LOCAL_CPU))
max_bhb_k = max(max_bhb_k, spectre_bhb_loop_affected());

return false;
return true;
}

static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
Expand Down Expand Up @@ -1003,7 +1008,7 @@ early_param("nospectre_bhb", parse_spectre_bhb_param);
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
{
bp_hardening_cb_t cpu_cb;
enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
enum mitigation_state state = SPECTRE_VULNERABLE;
struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);

if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
Expand All @@ -1029,7 +1034,7 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
state = SPECTRE_MITIGATED;
set_bit(BHB_INSN, &system_bhb_mitigations);
} else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
} else if (spectre_bhb_loop_affected()) {
/*
* Ensure KVM uses the indirect vector which will have the
* branchy-loop added. A57/A72-r0 will already have selected
Expand All @@ -1042,32 +1047,29 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
state = SPECTRE_MITIGATED;
set_bit(BHB_LOOP, &system_bhb_mitigations);
} else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
if (fw_state == SPECTRE_MITIGATED) {
/*
* Ensure KVM uses one of the spectre bp_hardening
* vectors. The indirect vector doesn't include the EL3
* call, so needs upgrading to
* HYP_VECTOR_SPECTRE_INDIRECT.
*/
if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
data->slot += 1;

this_cpu_set_vectors(EL1_VECTOR_BHB_FW);

/*
* The WA3 call in the vectors supersedes the WA1 call
* made during context-switch. Uninstall any firmware
* bp_hardening callback.
*/
cpu_cb = spectre_v2_get_sw_mitigation_cb();
if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
__this_cpu_write(bp_hardening_data.fn, NULL);

state = SPECTRE_MITIGATED;
set_bit(BHB_FW, &system_bhb_mitigations);
}
} else if (has_spectre_bhb_fw_mitigation()) {
/*
* Ensure KVM uses one of the spectre bp_hardening
* vectors. The indirect vector doesn't include the EL3
* call, so needs upgrading to
* HYP_VECTOR_SPECTRE_INDIRECT.
*/
if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
data->slot += 1;

this_cpu_set_vectors(EL1_VECTOR_BHB_FW);

/*
* The WA3 call in the vectors supersedes the WA1 call
* made during context-switch. Uninstall any firmware
* bp_hardening callback.
*/
cpu_cb = spectre_v2_get_sw_mitigation_cb();
if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
__this_cpu_write(bp_hardening_data.fn, NULL);

state = SPECTRE_MITIGATED;
set_bit(BHB_FW, &system_bhb_mitigations);
}

update_mitigation_state(&spectre_bhb_state, state);
Expand Down Expand Up @@ -1101,7 +1103,6 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
{
u8 rd;
u32 insn;
u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);

BUG_ON(nr_inst != 1); /* MOV -> MOV */

Expand All @@ -1110,7 +1111,7 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,

insn = le32_to_cpu(*origptr);
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
insn = aarch64_insn_gen_movewide(rd, max_bhb_k, 0,
AARCH64_INSN_VARIANT_64BIT,
AARCH64_INSN_MOVEWIDE_ZERO);
*updptr++ = cpu_to_le32(insn);
Expand Down

0 comments on commit e403e85

Please sign in to comment.