2 * Contains CPU specific errata definitions
4 * Copyright (C) 2014 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/arm-smccc.h>
20 #include <linux/psci.h>
21 #include <linux/types.h>
22 #include <asm/cachetype.h>
24 #include <asm/cputype.h>
25 #include <asm/cpufeature.h>
26 #include <asm/vectors.h>
28 static bool __maybe_unused
29 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
31 u32 midr = read_cpuid_id();
33 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
34 return is_midr_in_range(midr, &entry->midr_range);
37 static bool __maybe_unused
38 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
41 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
42 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
46 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
49 u64 mask = CTR_CACHE_MINLINE_MASK;
51 /* Skip matching the min line sizes for cache type check */
52 if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
53 mask ^= arm64_ftr_reg_ctrel0.strict_mask;
55 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
56 return (read_cpuid_cachetype() & mask) !=
57 (arm64_ftr_reg_ctrel0.sys_val & mask);
61 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
63 /* Clear SCTLR_EL1.UCT */
64 config_sctlr_el1(SCTLR_EL1_UCT, 0);
67 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
68 #include <asm/mmu_context.h>
69 #include <asm/cacheflush.h>
71 static bool __hardenbp_enab;
72 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
75 extern char __smccc_workaround_1_smc_start[];
76 extern char __smccc_workaround_1_smc_end[];
77 extern char __smccc_workaround_1_hvc_start[];
78 extern char __smccc_workaround_1_hvc_end[];
79 extern char __smccc_workaround_3_smc_start[];
80 extern char __smccc_workaround_3_smc_end[];
81 extern char __spectre_bhb_loop_k8_start[];
82 extern char __spectre_bhb_loop_k8_end[];
83 extern char __spectre_bhb_loop_k24_start[];
84 extern char __spectre_bhb_loop_k24_end[];
85 extern char __spectre_bhb_loop_k32_start[];
86 extern char __spectre_bhb_loop_k32_end[];
87 extern char __spectre_bhb_clearbhb_start[];
88 extern char __spectre_bhb_clearbhb_end[];
90 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
91 const char *hyp_vecs_end)
93 void *dst = __bp_harden_hyp_vecs_start + slot * SZ_2K;
96 for (i = 0; i < SZ_2K; i += 0x80)
97 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
99 flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
102 static DEFINE_SPINLOCK(bp_lock);
103 static int last_slot = -1;
105 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
106 const char *hyp_vecs_start,
107 const char *hyp_vecs_end)
113 for_each_possible_cpu(cpu) {
114 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
115 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
122 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
123 / SZ_2K) <= last_slot);
125 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
128 if (fn != __this_cpu_read(bp_hardening_data.fn)) {
129 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
130 __this_cpu_write(bp_hardening_data.fn, fn);
131 __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
132 __hardenbp_enab = true;
134 spin_unlock(&bp_lock);
137 #define __smccc_workaround_1_smc_start NULL
138 #define __smccc_workaround_1_smc_end NULL
139 #define __smccc_workaround_1_hvc_start NULL
140 #define __smccc_workaround_1_hvc_end NULL
142 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
143 const char *hyp_vecs_start,
144 const char *hyp_vecs_end)
146 __this_cpu_write(bp_hardening_data.fn, fn);
147 __hardenbp_enab = true;
149 #endif /* CONFIG_KVM */
151 static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
152 bp_hardening_cb_t fn,
153 const char *hyp_vecs_start,
154 const char *hyp_vecs_end)
158 if (!entry->matches(entry, SCOPE_LOCAL_CPU))
161 pfr0 = read_cpuid(ID_AA64PFR0_EL1);
162 if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
165 __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
168 #include <uapi/linux/psci.h>
169 #include <linux/arm-smccc.h>
170 #include <linux/psci.h>
172 static void call_smc_arch_workaround_1(void)
174 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
177 static void call_hvc_arch_workaround_1(void)
179 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
183 enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
185 bp_hardening_cb_t cb;
186 void *smccc_start, *smccc_end;
187 struct arm_smccc_res res;
189 if (!entry->matches(entry, SCOPE_LOCAL_CPU))
192 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
195 switch (psci_ops.conduit) {
196 case PSCI_CONDUIT_HVC:
197 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
198 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
201 cb = call_hvc_arch_workaround_1;
202 smccc_start = __smccc_workaround_1_hvc_start;
203 smccc_end = __smccc_workaround_1_hvc_end;
206 case PSCI_CONDUIT_SMC:
207 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
208 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
211 cb = call_smc_arch_workaround_1;
212 smccc_start = __smccc_workaround_1_smc_start;
213 smccc_end = __smccc_workaround_1_smc_end;
220 install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
224 #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
226 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
227 __le32 *origptr, __le32 *updptr,
232 BUG_ON(nr_inst != 1);
234 switch (psci_ops.conduit) {
235 case PSCI_CONDUIT_HVC:
236 insn = aarch64_insn_get_hvc_value();
238 case PSCI_CONDUIT_SMC:
239 insn = aarch64_insn_get_smc_value();
245 *updptr = cpu_to_le32(insn);
248 #ifdef CONFIG_ARM64_SSBD
249 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
251 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
253 static const struct ssbd_options {
257 { "force-on", ARM64_SSBD_FORCE_ENABLE, },
258 { "force-off", ARM64_SSBD_FORCE_DISABLE, },
259 { "kernel", ARM64_SSBD_KERNEL, },
262 static int __init ssbd_cfg(char *buf)
269 for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
270 int len = strlen(ssbd_options[i].str);
272 if (strncmp(buf, ssbd_options[i].str, len))
275 ssbd_state = ssbd_options[i].state;
281 early_param("ssbd", ssbd_cfg);
283 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
284 __le32 *origptr, __le32 *updptr,
287 BUG_ON(nr_inst != 1);
289 * Only allow mitigation on EL1 entry/exit and guest
290 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
293 if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
294 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
297 void arm64_set_ssbd_mitigation(bool state)
299 switch (psci_ops.conduit) {
300 case PSCI_CONDUIT_HVC:
301 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
304 case PSCI_CONDUIT_SMC:
305 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
314 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
317 struct arm_smccc_res res;
318 bool required = true;
321 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
323 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
324 ssbd_state = ARM64_SSBD_UNKNOWN;
328 switch (psci_ops.conduit) {
329 case PSCI_CONDUIT_HVC:
330 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
331 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
334 case PSCI_CONDUIT_SMC:
335 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
336 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
340 ssbd_state = ARM64_SSBD_UNKNOWN;
347 case SMCCC_RET_NOT_SUPPORTED:
348 ssbd_state = ARM64_SSBD_UNKNOWN;
351 case SMCCC_RET_NOT_REQUIRED:
352 pr_info_once("%s mitigation not required\n", entry->desc);
353 ssbd_state = ARM64_SSBD_MITIGATED;
356 case SMCCC_RET_SUCCESS:
360 case 1: /* Mitigation not required on this CPU */
369 switch (ssbd_state) {
370 case ARM64_SSBD_FORCE_DISABLE:
371 pr_info_once("%s disabled from command-line\n", entry->desc);
372 arm64_set_ssbd_mitigation(false);
376 case ARM64_SSBD_KERNEL:
378 __this_cpu_write(arm64_ssbd_callback_required, 1);
379 arm64_set_ssbd_mitigation(true);
383 case ARM64_SSBD_FORCE_ENABLE:
384 pr_info_once("%s forced from command-line\n", entry->desc);
385 arm64_set_ssbd_mitigation(true);
396 #endif /* CONFIG_ARM64_SSBD */
398 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
399 .matches = is_affected_midr_range, \
400 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
402 #define CAP_MIDR_ALL_VERSIONS(model) \
403 .matches = is_affected_midr_range, \
404 .midr_range = MIDR_ALL_VERSIONS(model)
406 #define MIDR_FIXED(rev, revidr_mask) \
407 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
409 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
410 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
411 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
413 #define CAP_MIDR_RANGE_LIST(list) \
414 .matches = is_affected_midr_range_list, \
415 .midr_range_list = list
417 /* Errata affecting a range of revisions of given model variant */
418 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
419 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
421 /* Errata affecting a single variant/revision of a model */
422 #define ERRATA_MIDR_REV(model, var, rev) \
423 ERRATA_MIDR_RANGE(model, var, rev, var, rev)
425 /* Errata affecting all variants/revisions of a given a model */
426 #define ERRATA_MIDR_ALL_VERSIONS(model) \
427 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
428 CAP_MIDR_ALL_VERSIONS(model)
430 /* Errata affecting a list of midr ranges, with same work around */
431 #define ERRATA_MIDR_RANGE_LIST(midr_list) \
432 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
433 CAP_MIDR_RANGE_LIST(midr_list)
435 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
438 * List of CPUs where we need to issue a psci call to
439 * harden the branch predictor.
441 static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
442 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
443 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
444 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
445 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
446 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
447 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
453 #ifdef CONFIG_ARM64_ERRATUM_1742098
454 static struct midr_range broken_aarch32_aes[] = {
455 MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
456 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
461 const struct arm64_cpu_capabilities arm64_errata[] = {
462 #if defined(CONFIG_ARM64_ERRATUM_826319) || \
463 defined(CONFIG_ARM64_ERRATUM_827319) || \
464 defined(CONFIG_ARM64_ERRATUM_824069)
466 /* Cortex-A53 r0p[012] */
467 .desc = "ARM errata 826319, 827319, 824069",
468 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
469 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
470 .cpu_enable = cpu_enable_cache_maint_trap,
473 #ifdef CONFIG_ARM64_ERRATUM_819472
475 /* Cortex-A53 r0p[01] */
476 .desc = "ARM errata 819472",
477 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
478 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
479 .cpu_enable = cpu_enable_cache_maint_trap,
482 #ifdef CONFIG_ARM64_ERRATUM_832075
484 /* Cortex-A57 r0p0 - r1p2 */
485 .desc = "ARM erratum 832075",
486 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
487 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
492 #ifdef CONFIG_ARM64_ERRATUM_834220
494 /* Cortex-A57 r0p0 - r1p2 */
495 .desc = "ARM erratum 834220",
496 .capability = ARM64_WORKAROUND_834220,
497 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
502 #ifdef CONFIG_ARM64_ERRATUM_845719
504 /* Cortex-A53 r0p[01234] */
505 .desc = "ARM erratum 845719",
506 .capability = ARM64_WORKAROUND_845719,
507 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
510 #ifdef CONFIG_CAVIUM_ERRATUM_23154
512 /* Cavium ThunderX, pass 1.x */
513 .desc = "Cavium erratum 23154",
514 .capability = ARM64_WORKAROUND_CAVIUM_23154,
515 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
518 #ifdef CONFIG_CAVIUM_ERRATUM_27456
520 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
521 .desc = "Cavium erratum 27456",
522 .capability = ARM64_WORKAROUND_CAVIUM_27456,
523 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
528 /* Cavium ThunderX, T81 pass 1.0 */
529 .desc = "Cavium erratum 27456",
530 .capability = ARM64_WORKAROUND_CAVIUM_27456,
531 ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
535 .desc = "Mismatched cache line size",
536 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
537 .matches = has_mismatched_cache_type,
538 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
539 .cpu_enable = cpu_enable_trap_ctr_access,
542 .desc = "Mismatched cache type",
543 .capability = ARM64_MISMATCHED_CACHE_TYPE,
544 .matches = has_mismatched_cache_type,
545 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
546 .cpu_enable = cpu_enable_trap_ctr_access,
548 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
550 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
551 ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
552 .cpu_enable = enable_smccc_arch_workaround_1,
555 #ifdef CONFIG_ARM64_SSBD
557 .desc = "Speculative Store Bypass Disable",
558 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
559 .capability = ARM64_SSBD,
560 .matches = has_ssbd_mitigation,
563 #ifdef CONFIG_ARM64_ERRATUM_1188873
565 /* Cortex-A76 r0p0 to r2p0 */
566 .desc = "ARM erratum 1188873",
567 .capability = ARM64_WORKAROUND_1188873,
568 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
572 .desc = "Spectre-BHB",
573 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
574 .capability = ARM64_SPECTRE_BHB,
575 .matches = is_spectre_bhb_affected,
576 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
577 .cpu_enable = spectre_bhb_enable_mitigation,
580 #ifdef CONFIG_ARM64_ERRATUM_1742098
582 .desc = "ARM erratum 1742098",
583 .capability = ARM64_WORKAROUND_1742098,
584 CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
585 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
593 * We try to ensure that the mitigation state can never change as the result of
594 * onlining a late CPU.
596 static void __maybe_unused update_mitigation_state(enum mitigation_state *oldp,
597 enum mitigation_state new)
599 enum mitigation_state state;
602 state = READ_ONCE(*oldp);
605 } while (cmpxchg_relaxed(oldp, state, new) != state);
612 * - Mitigated by a branchy loop a CPU specific number of times, and listed
613 * in our "loop mitigated list".
614 * - Mitigated in software by the firmware Spectre v2 call.
615 * - Has the ClearBHB instruction to perform the mitigation.
616 * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
617 * software mitigation in the vectors is needed.
618 * - Has CSV2.3, so is unaffected.
620 static enum mitigation_state spectre_bhb_state;
622 enum mitigation_state arm64_get_spectre_bhb_state(void)
624 return spectre_bhb_state;
628 * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
629 * SCOPE_SYSTEM call will give the right answer.
631 u8 spectre_bhb_loop_affected(int scope)
636 if (scope == SCOPE_LOCAL_CPU) {
637 static const struct midr_range spectre_bhb_k32_list[] = {
638 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
639 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
640 MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
641 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
642 MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
643 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
644 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
647 static const struct midr_range spectre_bhb_k24_list[] = {
648 MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
649 MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
650 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
653 static const struct midr_range spectre_bhb_k8_list[] = {
654 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
655 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
659 if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
661 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
663 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
666 max_bhb_k = max(max_bhb_k, k);
674 static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
677 struct arm_smccc_res res;
679 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
680 return SPECTRE_VULNERABLE;
682 switch (psci_ops.conduit) {
683 case PSCI_CONDUIT_HVC:
684 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
685 ARM_SMCCC_ARCH_WORKAROUND_3, &res);
688 case PSCI_CONDUIT_SMC:
689 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
690 ARM_SMCCC_ARCH_WORKAROUND_3, &res);
694 return SPECTRE_VULNERABLE;
699 case SMCCC_RET_SUCCESS:
700 return SPECTRE_MITIGATED;
701 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
702 return SPECTRE_UNAFFECTED;
704 case SMCCC_RET_NOT_SUPPORTED:
705 return SPECTRE_VULNERABLE;
709 static bool is_spectre_bhb_fw_affected(int scope)
711 static bool system_affected;
712 enum mitigation_state fw_state;
713 bool has_smccc = (psci_ops.smccc_version >= SMCCC_VERSION_1_1);
714 static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
715 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
716 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
719 bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
720 spectre_bhb_firmware_mitigated_list);
722 if (scope != SCOPE_LOCAL_CPU)
723 return system_affected;
725 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
726 if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
727 system_affected = true;
734 static bool __maybe_unused supports_ecbhb(int scope)
738 if (scope == SCOPE_LOCAL_CPU)
739 mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
741 mmfr1 = read_system_reg(SYS_ID_AA64MMFR1_EL1);
743 return cpuid_feature_extract_unsigned_field(mmfr1,
744 ID_AA64MMFR1_ECBHB_SHIFT);
747 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
750 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
752 if (supports_csv2p3(scope))
755 if (supports_clearbhb(scope))
758 if (spectre_bhb_loop_affected(scope))
761 if (is_spectre_bhb_fw_affected(scope))
767 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
768 static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
770 const char *v = arm64_get_bp_hardening_vector(slot);
775 __this_cpu_write(this_cpu_vector, v);
778 * When KPTI is in use, the vectors are switched when exiting to
781 if (arm64_kernel_unmapped_at_el0())
784 write_sysreg(v, vbar_el1);
789 static const char *kvm_bhb_get_vecs_end(const char *start)
791 if (start == __smccc_workaround_3_smc_start)
792 return __smccc_workaround_3_smc_end;
793 else if (start == __spectre_bhb_loop_k8_start)
794 return __spectre_bhb_loop_k8_end;
795 else if (start == __spectre_bhb_loop_k24_start)
796 return __spectre_bhb_loop_k24_end;
797 else if (start == __spectre_bhb_loop_k32_start)
798 return __spectre_bhb_loop_k32_end;
799 else if (start == __spectre_bhb_clearbhb_start)
800 return __spectre_bhb_clearbhb_end;
805 static void kvm_setup_bhb_slot(const char *hyp_vecs_start)
808 const char *hyp_vecs_end;
810 if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
813 hyp_vecs_end = kvm_bhb_get_vecs_end(hyp_vecs_start);
814 if (WARN_ON_ONCE(!hyp_vecs_start || !hyp_vecs_end))
818 for_each_possible_cpu(cpu) {
819 if (per_cpu(bp_hardening_data.template_start, cpu) == hyp_vecs_start) {
820 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
827 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
828 / SZ_2K) <= last_slot);
830 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
833 if (hyp_vecs_start != __this_cpu_read(bp_hardening_data.template_start)) {
834 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
835 __this_cpu_write(bp_hardening_data.template_start,
838 spin_unlock(&bp_lock);
841 #define __smccc_workaround_3_smc_start NULL
842 #define __spectre_bhb_loop_k8_start NULL
843 #define __spectre_bhb_loop_k24_start NULL
844 #define __spectre_bhb_loop_k32_start NULL
845 #define __spectre_bhb_clearbhb_start NULL
847 static void kvm_setup_bhb_slot(const char *hyp_vecs_start) { };
848 #endif /* CONFIG_KVM */
850 static bool is_spectrev2_safe(void)
852 return !is_midr_in_range_list(read_cpuid_id(),
853 arm64_bp_harden_smccc_cpus);
856 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
858 enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
860 if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
863 if (!is_spectrev2_safe() && !__hardenbp_enab) {
864 /* No point mitigating Spectre-BHB alone. */
865 } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
866 pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
867 } else if (cpu_mitigations_off()) {
868 pr_info_once("spectre-bhb mitigation disabled by command line option\n");
869 } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
870 state = SPECTRE_MITIGATED;
871 } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
872 kvm_setup_bhb_slot(__spectre_bhb_clearbhb_start);
873 this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
875 state = SPECTRE_MITIGATED;
876 } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
877 switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) {
880 * A57/A72-r0 will already have selected the
881 * spectre-indirect vector, which is sufficient
884 if (!__this_cpu_read(bp_hardening_data.fn))
885 kvm_setup_bhb_slot(__spectre_bhb_loop_k8_start);
888 kvm_setup_bhb_slot(__spectre_bhb_loop_k24_start);
891 kvm_setup_bhb_slot(__spectre_bhb_loop_k32_start);
896 this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
898 state = SPECTRE_MITIGATED;
899 } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
900 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
901 if (fw_state == SPECTRE_MITIGATED) {
902 kvm_setup_bhb_slot(__smccc_workaround_3_smc_start);
903 this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
906 * With WA3 in the vectors, the WA1 calls can be
909 __this_cpu_write(bp_hardening_data.fn, NULL);
911 state = SPECTRE_MITIGATED;
915 update_mitigation_state(&spectre_bhb_state, state);
918 /* Patched to correct the immediate */
919 void __init spectre_bhb_patch_loop_iter(struct alt_instr *alt,
920 __le32 *origptr, __le32 *updptr, int nr_inst)
924 u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
926 BUG_ON(nr_inst != 1); /* MOV -> MOV */
928 if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
931 insn = le32_to_cpu(*origptr);
932 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
933 insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
934 AARCH64_INSN_VARIANT_64BIT,
935 AARCH64_INSN_MOVEWIDE_ZERO);
936 *updptr++ = cpu_to_le32(insn);
938 #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */