GNU Linux-libre 4.9.337-gnu1
[releases.git] / arch / arm64 / kernel / cpu_errata.c
1 /*
2  * Contains CPU specific errata definitions
3  *
4  * Copyright (C) 2014 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/arm-smccc.h>
20 #include <linux/psci.h>
21 #include <linux/types.h>
22 #include <asm/cachetype.h>
23 #include <asm/cpu.h>
24 #include <asm/cputype.h>
25 #include <asm/cpufeature.h>
26 #include <asm/vectors.h>
27
28 static bool __maybe_unused
29 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
30 {
31         u32 midr = read_cpuid_id();
32
33         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
34         return is_midr_in_range(midr, &entry->midr_range);
35 }
36
37 static bool __maybe_unused
38 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
39                             int scope)
40 {
41         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
42         return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
43 }
44
45 static bool
46 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
47                           int scope)
48 {
49         u64 mask = CTR_CACHE_MINLINE_MASK;
50
51         /* Skip matching the min line sizes for cache type check */
52         if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
53                 mask ^= arm64_ftr_reg_ctrel0.strict_mask;
54
55         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
56         return (read_cpuid_cachetype() & mask) !=
57                (arm64_ftr_reg_ctrel0.sys_val & mask);
58 }
59
60 static void
61 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
62 {
63         /* Clear SCTLR_EL1.UCT */
64         config_sctlr_el1(SCTLR_EL1_UCT, 0);
65 }
66
67 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
68 #include <asm/mmu_context.h>
69 #include <asm/cacheflush.h>
70
71 static bool __hardenbp_enab;
72 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
73
74 #ifdef CONFIG_KVM
75 extern char __smccc_workaround_1_smc_start[];
76 extern char __smccc_workaround_1_smc_end[];
77 extern char __smccc_workaround_1_hvc_start[];
78 extern char __smccc_workaround_1_hvc_end[];
79 extern char __smccc_workaround_3_smc_start[];
80 extern char __smccc_workaround_3_smc_end[];
81 extern char __spectre_bhb_loop_k8_start[];
82 extern char __spectre_bhb_loop_k8_end[];
83 extern char __spectre_bhb_loop_k24_start[];
84 extern char __spectre_bhb_loop_k24_end[];
85 extern char __spectre_bhb_loop_k32_start[];
86 extern char __spectre_bhb_loop_k32_end[];
87 extern char __spectre_bhb_clearbhb_start[];
88 extern char __spectre_bhb_clearbhb_end[];
89
90 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
91                                 const char *hyp_vecs_end)
92 {
93         void *dst = __bp_harden_hyp_vecs_start + slot * SZ_2K;
94         int i;
95
96         for (i = 0; i < SZ_2K; i += 0x80)
97                 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
98
99         flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
100 }
101
102 static DEFINE_SPINLOCK(bp_lock);
103 static int last_slot = -1;
104
105 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
106                                       const char *hyp_vecs_start,
107                                       const char *hyp_vecs_end)
108 {
109
110         int cpu, slot = -1;
111
112         spin_lock(&bp_lock);
113         for_each_possible_cpu(cpu) {
114                 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
115                         slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
116                         break;
117                 }
118         }
119
120         if (slot == -1) {
121                 last_slot++;
122                 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
123                         / SZ_2K) <= last_slot);
124                 slot = last_slot;
125                 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
126         }
127
128         if (fn != __this_cpu_read(bp_hardening_data.fn)) {
129                 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
130                 __this_cpu_write(bp_hardening_data.fn, fn);
131                 __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
132                 __hardenbp_enab = true;
133         }
134         spin_unlock(&bp_lock);
135 }
136 #else
137 #define __smccc_workaround_1_smc_start          NULL
138 #define __smccc_workaround_1_smc_end            NULL
139 #define __smccc_workaround_1_hvc_start          NULL
140 #define __smccc_workaround_1_hvc_end            NULL
141
142 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
143                                       const char *hyp_vecs_start,
144                                       const char *hyp_vecs_end)
145 {
146         __this_cpu_write(bp_hardening_data.fn, fn);
147         __hardenbp_enab = true;
148 }
149 #endif  /* CONFIG_KVM */
150
151 static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
152                                      bp_hardening_cb_t fn,
153                                      const char *hyp_vecs_start,
154                                      const char *hyp_vecs_end)
155 {
156         u64 pfr0;
157
158         if (!entry->matches(entry, SCOPE_LOCAL_CPU))
159                 return;
160
161         pfr0 = read_cpuid(ID_AA64PFR0_EL1);
162         if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
163                 return;
164
165         __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
166 }
167
168 #include <uapi/linux/psci.h>
169 #include <linux/arm-smccc.h>
170 #include <linux/psci.h>
171
172 static void call_smc_arch_workaround_1(void)
173 {
174         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
175 }
176
177 static void call_hvc_arch_workaround_1(void)
178 {
179         arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
180 }
181
182 static void
183 enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
184 {
185         bp_hardening_cb_t cb;
186         void *smccc_start, *smccc_end;
187         struct arm_smccc_res res;
188
189         if (!entry->matches(entry, SCOPE_LOCAL_CPU))
190                 return;
191
192         if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
193                 return;
194
195         switch (psci_ops.conduit) {
196         case PSCI_CONDUIT_HVC:
197                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
198                                   ARM_SMCCC_ARCH_WORKAROUND_1, &res);
199                 if ((int)res.a0 < 0)
200                         return;
201                 cb = call_hvc_arch_workaround_1;
202                 smccc_start = __smccc_workaround_1_hvc_start;
203                 smccc_end = __smccc_workaround_1_hvc_end;
204                 break;
205
206         case PSCI_CONDUIT_SMC:
207                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
208                                   ARM_SMCCC_ARCH_WORKAROUND_1, &res);
209                 if ((int)res.a0 < 0)
210                         return;
211                 cb = call_smc_arch_workaround_1;
212                 smccc_start = __smccc_workaround_1_smc_start;
213                 smccc_end = __smccc_workaround_1_smc_end;
214                 break;
215
216         default:
217                 return;
218         }
219
220         install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
221
222         return;
223 }
224 #endif  /* CONFIG_HARDEN_BRANCH_PREDICTOR */
225
226 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
227                                        __le32 *origptr, __le32 *updptr,
228                                        int nr_inst)
229 {
230         u32 insn;
231
232         BUG_ON(nr_inst != 1);
233
234         switch (psci_ops.conduit) {
235         case PSCI_CONDUIT_HVC:
236                 insn = aarch64_insn_get_hvc_value();
237                 break;
238         case PSCI_CONDUIT_SMC:
239                 insn = aarch64_insn_get_smc_value();
240                 break;
241         default:
242                 return;
243         }
244
245         *updptr = cpu_to_le32(insn);
246 }
247
248 #ifdef CONFIG_ARM64_SSBD
249 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
250
251 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
252
253 static const struct ssbd_options {
254         const char      *str;
255         int             state;
256 } ssbd_options[] = {
257         { "force-on",   ARM64_SSBD_FORCE_ENABLE, },
258         { "force-off",  ARM64_SSBD_FORCE_DISABLE, },
259         { "kernel",     ARM64_SSBD_KERNEL, },
260 };
261
262 static int __init ssbd_cfg(char *buf)
263 {
264         int i;
265
266         if (!buf || !buf[0])
267                 return -EINVAL;
268
269         for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
270                 int len = strlen(ssbd_options[i].str);
271
272                 if (strncmp(buf, ssbd_options[i].str, len))
273                         continue;
274
275                 ssbd_state = ssbd_options[i].state;
276                 return 0;
277         }
278
279         return -EINVAL;
280 }
281 early_param("ssbd", ssbd_cfg);
282
283 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
284                                       __le32 *origptr, __le32 *updptr,
285                                       int nr_inst)
286 {
287         BUG_ON(nr_inst != 1);
288         /*
289          * Only allow mitigation on EL1 entry/exit and guest
290          * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
291          * be flipped.
292          */
293         if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
294                 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
295 }
296
297 void arm64_set_ssbd_mitigation(bool state)
298 {
299         switch (psci_ops.conduit) {
300         case PSCI_CONDUIT_HVC:
301                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
302                 break;
303
304         case PSCI_CONDUIT_SMC:
305                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
306                 break;
307
308         default:
309                 WARN_ON_ONCE(1);
310                 break;
311         }
312 }
313
314 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
315                                     int scope)
316 {
317         struct arm_smccc_res res;
318         bool required = true;
319         s32 val;
320
321         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
322
323         if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
324                 ssbd_state = ARM64_SSBD_UNKNOWN;
325                 return false;
326         }
327
328         switch (psci_ops.conduit) {
329         case PSCI_CONDUIT_HVC:
330                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
331                                   ARM_SMCCC_ARCH_WORKAROUND_2, &res);
332                 break;
333
334         case PSCI_CONDUIT_SMC:
335                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
336                                   ARM_SMCCC_ARCH_WORKAROUND_2, &res);
337                 break;
338
339         default:
340                 ssbd_state = ARM64_SSBD_UNKNOWN;
341                 return false;
342         }
343
344         val = (s32)res.a0;
345
346         switch (val) {
347         case SMCCC_RET_NOT_SUPPORTED:
348                 ssbd_state = ARM64_SSBD_UNKNOWN;
349                 return false;
350
351         case SMCCC_RET_NOT_REQUIRED:
352                 pr_info_once("%s mitigation not required\n", entry->desc);
353                 ssbd_state = ARM64_SSBD_MITIGATED;
354                 return false;
355
356         case SMCCC_RET_SUCCESS:
357                 required = true;
358                 break;
359
360         case 1: /* Mitigation not required on this CPU */
361                 required = false;
362                 break;
363
364         default:
365                 WARN_ON(1);
366                 return false;
367         }
368
369         switch (ssbd_state) {
370         case ARM64_SSBD_FORCE_DISABLE:
371                 pr_info_once("%s disabled from command-line\n", entry->desc);
372                 arm64_set_ssbd_mitigation(false);
373                 required = false;
374                 break;
375
376         case ARM64_SSBD_KERNEL:
377                 if (required) {
378                         __this_cpu_write(arm64_ssbd_callback_required, 1);
379                         arm64_set_ssbd_mitigation(true);
380                 }
381                 break;
382
383         case ARM64_SSBD_FORCE_ENABLE:
384                 pr_info_once("%s forced from command-line\n", entry->desc);
385                 arm64_set_ssbd_mitigation(true);
386                 required = true;
387                 break;
388
389         default:
390                 WARN_ON(1);
391                 break;
392         }
393
394         return required;
395 }
396 #endif  /* CONFIG_ARM64_SSBD */
397
398 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)       \
399         .matches = is_affected_midr_range,                      \
400         .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
401
402 #define CAP_MIDR_ALL_VERSIONS(model)                                    \
403         .matches = is_affected_midr_range,                              \
404         .midr_range = MIDR_ALL_VERSIONS(model)
405
406 #define MIDR_FIXED(rev, revidr_mask) \
407         .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
408
409 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)            \
410         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                         \
411         CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
412
413 #define CAP_MIDR_RANGE_LIST(list)                               \
414         .matches = is_affected_midr_range_list,                 \
415         .midr_range_list = list
416
417 /* Errata affecting a range of revisions of  given model variant */
418 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)      \
419         ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
420
421 /* Errata affecting a single variant/revision of a model */
422 #define ERRATA_MIDR_REV(model, var, rev)        \
423         ERRATA_MIDR_RANGE(model, var, rev, var, rev)
424
425 /* Errata affecting all variants/revisions of a given a model */
426 #define ERRATA_MIDR_ALL_VERSIONS(model)                         \
427         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
428         CAP_MIDR_ALL_VERSIONS(model)
429
430 /* Errata affecting a list of midr ranges, with same work around */
431 #define ERRATA_MIDR_RANGE_LIST(midr_list)                       \
432         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
433         CAP_MIDR_RANGE_LIST(midr_list)
434
435 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
436
437 /*
438  * List of CPUs where we need to issue a psci call to
439  * harden the branch predictor.
440  */
441 static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
442         MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
443         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
444         MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
445         MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
446         MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
447         MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
448         {},
449 };
450
451 #endif
452
453 #ifdef CONFIG_ARM64_ERRATUM_1742098
454 static struct midr_range broken_aarch32_aes[] = {
455         MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
456         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
457         {},
458 };
459 #endif
460
461 const struct arm64_cpu_capabilities arm64_errata[] = {
462 #if     defined(CONFIG_ARM64_ERRATUM_826319) || \
463         defined(CONFIG_ARM64_ERRATUM_827319) || \
464         defined(CONFIG_ARM64_ERRATUM_824069)
465         {
466         /* Cortex-A53 r0p[012] */
467                 .desc = "ARM errata 826319, 827319, 824069",
468                 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
469                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
470                 .cpu_enable = cpu_enable_cache_maint_trap,
471         },
472 #endif
473 #ifdef CONFIG_ARM64_ERRATUM_819472
474         {
475         /* Cortex-A53 r0p[01] */
476                 .desc = "ARM errata 819472",
477                 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
478                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
479                 .cpu_enable = cpu_enable_cache_maint_trap,
480         },
481 #endif
482 #ifdef CONFIG_ARM64_ERRATUM_832075
483         {
484         /* Cortex-A57 r0p0 - r1p2 */
485                 .desc = "ARM erratum 832075",
486                 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
487                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
488                                   0, 0,
489                                   1, 2),
490         },
491 #endif
492 #ifdef CONFIG_ARM64_ERRATUM_834220
493         {
494         /* Cortex-A57 r0p0 - r1p2 */
495                 .desc = "ARM erratum 834220",
496                 .capability = ARM64_WORKAROUND_834220,
497                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
498                                   0, 0,
499                                   1, 2),
500         },
501 #endif
502 #ifdef CONFIG_ARM64_ERRATUM_845719
503         {
504         /* Cortex-A53 r0p[01234] */
505                 .desc = "ARM erratum 845719",
506                 .capability = ARM64_WORKAROUND_845719,
507                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
508         },
509 #endif
510 #ifdef CONFIG_CAVIUM_ERRATUM_23154
511         {
512         /* Cavium ThunderX, pass 1.x */
513                 .desc = "Cavium erratum 23154",
514                 .capability = ARM64_WORKAROUND_CAVIUM_23154,
515                 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
516         },
517 #endif
518 #ifdef CONFIG_CAVIUM_ERRATUM_27456
519         {
520         /* Cavium ThunderX, T88 pass 1.x - 2.1 */
521                 .desc = "Cavium erratum 27456",
522                 .capability = ARM64_WORKAROUND_CAVIUM_27456,
523                 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
524                                   0, 0,
525                                   1, 1),
526         },
527         {
528         /* Cavium ThunderX, T81 pass 1.0 */
529                 .desc = "Cavium erratum 27456",
530                 .capability = ARM64_WORKAROUND_CAVIUM_27456,
531                 ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
532         },
533 #endif
534         {
535                 .desc = "Mismatched cache line size",
536                 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
537                 .matches = has_mismatched_cache_type,
538                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
539                 .cpu_enable = cpu_enable_trap_ctr_access,
540         },
541         {
542                 .desc = "Mismatched cache type",
543                 .capability = ARM64_MISMATCHED_CACHE_TYPE,
544                 .matches = has_mismatched_cache_type,
545                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
546                 .cpu_enable = cpu_enable_trap_ctr_access,
547         },
548 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
549         {
550                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
551                 ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
552                 .cpu_enable = enable_smccc_arch_workaround_1,
553         },
554 #endif
555 #ifdef CONFIG_ARM64_SSBD
556         {
557                 .desc = "Speculative Store Bypass Disable",
558                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
559                 .capability = ARM64_SSBD,
560                 .matches = has_ssbd_mitigation,
561         },
562 #endif
563 #ifdef CONFIG_ARM64_ERRATUM_1188873
564         {
565                 /* Cortex-A76 r0p0 to r2p0 */
566                 .desc = "ARM erratum 1188873",
567                 .capability = ARM64_WORKAROUND_1188873,
568                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
569         },
570 #endif
571         {
572                 .desc = "Spectre-BHB",
573                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
574                 .capability = ARM64_SPECTRE_BHB,
575                 .matches = is_spectre_bhb_affected,
576 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
577                 .cpu_enable = spectre_bhb_enable_mitigation,
578 #endif
579         },
580 #ifdef CONFIG_ARM64_ERRATUM_1742098
581         {
582                 .desc = "ARM erratum 1742098",
583                 .capability = ARM64_WORKAROUND_1742098,
584                 CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
585                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
586         },
587 #endif
588         {
589         }
590 };
591
592 /*
593  * We try to ensure that the mitigation state can never change as the result of
594  * onlining a late CPU.
595  */
596 static void __maybe_unused update_mitigation_state(enum mitigation_state *oldp,
597                                                    enum mitigation_state new)
598 {
599         enum mitigation_state state;
600
601         do {
602                 state = READ_ONCE(*oldp);
603                 if (new <= state)
604                         break;
605         } while (cmpxchg_relaxed(oldp, state, new) != state);
606 }
607
608 /*
609  * Spectre BHB.
610  *
611  * A CPU is either:
612  * - Mitigated by a branchy loop a CPU specific number of times, and listed
613  *   in our "loop mitigated list".
614  * - Mitigated in software by the firmware Spectre v2 call.
615  * - Has the ClearBHB instruction to perform the mitigation.
616  * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
617  *   software mitigation in the vectors is needed.
618  * - Has CSV2.3, so is unaffected.
619  */
620 static enum mitigation_state spectre_bhb_state;
621
622 enum mitigation_state arm64_get_spectre_bhb_state(void)
623 {
624         return spectre_bhb_state;
625 }
626
627 /*
628  * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
629  * SCOPE_SYSTEM call will give the right answer.
630  */
631 u8 spectre_bhb_loop_affected(int scope)
632 {
633         u8 k = 0;
634         static u8 max_bhb_k;
635
636         if (scope == SCOPE_LOCAL_CPU) {
637                 static const struct midr_range spectre_bhb_k32_list[] = {
638                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
639                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
640                         MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
641                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
642                         MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
643                         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
644                         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
645                         {},
646                 };
647                 static const struct midr_range spectre_bhb_k24_list[] = {
648                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
649                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
650                         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
651                         {},
652                 };
653                 static const struct midr_range spectre_bhb_k8_list[] = {
654                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
655                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
656                         {},
657                 };
658
659                 if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
660                         k = 32;
661                 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
662                         k = 24;
663                 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
664                         k =  8;
665
666                 max_bhb_k = max(max_bhb_k, k);
667         } else {
668                 k = max_bhb_k;
669         }
670
671         return k;
672 }
673
674 static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
675 {
676         int ret;
677         struct arm_smccc_res res;
678
679         if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
680                 return SPECTRE_VULNERABLE;
681
682         switch (psci_ops.conduit) {
683         case PSCI_CONDUIT_HVC:
684                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
685                                   ARM_SMCCC_ARCH_WORKAROUND_3, &res);
686                 break;
687
688         case PSCI_CONDUIT_SMC:
689                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
690                                   ARM_SMCCC_ARCH_WORKAROUND_3, &res);
691                 break;
692
693         default:
694                 return SPECTRE_VULNERABLE;
695         }
696
697         ret = res.a0;
698         switch (ret) {
699         case SMCCC_RET_SUCCESS:
700                 return SPECTRE_MITIGATED;
701         case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
702                 return SPECTRE_UNAFFECTED;
703         default:
704         case SMCCC_RET_NOT_SUPPORTED:
705                 return SPECTRE_VULNERABLE;
706         }
707 }
708
709 static bool is_spectre_bhb_fw_affected(int scope)
710 {
711         static bool system_affected;
712         enum mitigation_state fw_state;
713         bool has_smccc = (psci_ops.smccc_version >= SMCCC_VERSION_1_1);
714         static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
715                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
716                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
717                 {},
718         };
719         bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
720                                          spectre_bhb_firmware_mitigated_list);
721
722         if (scope != SCOPE_LOCAL_CPU)
723                 return system_affected;
724
725         fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
726         if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
727                 system_affected = true;
728                 return true;
729         }
730
731         return false;
732 }
733
734 static bool __maybe_unused supports_ecbhb(int scope)
735 {
736         u64 mmfr1;
737
738         if (scope == SCOPE_LOCAL_CPU)
739                 mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
740         else
741                 mmfr1 = read_system_reg(SYS_ID_AA64MMFR1_EL1);
742
743         return cpuid_feature_extract_unsigned_field(mmfr1,
744                                                     ID_AA64MMFR1_ECBHB_SHIFT);
745 }
746
747 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
748                              int scope)
749 {
750         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
751
752         if (supports_csv2p3(scope))
753                 return false;
754
755         if (supports_clearbhb(scope))
756                 return true;
757
758         if (spectre_bhb_loop_affected(scope))
759                 return true;
760
761         if (is_spectre_bhb_fw_affected(scope))
762                 return true;
763
764         return false;
765 }
766
767 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
768 static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
769 {
770         const char *v = arm64_get_bp_hardening_vector(slot);
771
772         if (slot < 0)
773                 return;
774
775         __this_cpu_write(this_cpu_vector, v);
776
777         /*
778          * When KPTI is in use, the vectors are switched when exiting to
779          * user-space.
780          */
781         if (arm64_kernel_unmapped_at_el0())
782                 return;
783
784         write_sysreg(v, vbar_el1);
785         isb();
786 }
787
788 #ifdef CONFIG_KVM
789 static const char *kvm_bhb_get_vecs_end(const char *start)
790 {
791         if (start == __smccc_workaround_3_smc_start)
792                 return __smccc_workaround_3_smc_end;
793         else if (start == __spectre_bhb_loop_k8_start)
794                 return __spectre_bhb_loop_k8_end;
795         else if (start == __spectre_bhb_loop_k24_start)
796                 return __spectre_bhb_loop_k24_end;
797         else if (start == __spectre_bhb_loop_k32_start)
798                 return __spectre_bhb_loop_k32_end;
799         else if (start == __spectre_bhb_clearbhb_start)
800                 return __spectre_bhb_clearbhb_end;
801
802         return NULL;
803 }
804
805 static void kvm_setup_bhb_slot(const char *hyp_vecs_start)
806 {
807         int cpu, slot = -1;
808         const char *hyp_vecs_end;
809
810         if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
811                 return;
812
813         hyp_vecs_end = kvm_bhb_get_vecs_end(hyp_vecs_start);
814         if (WARN_ON_ONCE(!hyp_vecs_start || !hyp_vecs_end))
815                 return;
816
817         spin_lock(&bp_lock);
818         for_each_possible_cpu(cpu) {
819                 if (per_cpu(bp_hardening_data.template_start, cpu) == hyp_vecs_start) {
820                         slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
821                         break;
822                 }
823         }
824
825         if (slot == -1) {
826                 last_slot++;
827                 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
828                         / SZ_2K) <= last_slot);
829                 slot = last_slot;
830                 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
831         }
832
833         if (hyp_vecs_start != __this_cpu_read(bp_hardening_data.template_start)) {
834                 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
835                 __this_cpu_write(bp_hardening_data.template_start,
836                                  hyp_vecs_start);
837         }
838         spin_unlock(&bp_lock);
839 }
840 #else
841 #define __smccc_workaround_3_smc_start NULL
842 #define __spectre_bhb_loop_k8_start NULL
843 #define __spectre_bhb_loop_k24_start NULL
844 #define __spectre_bhb_loop_k32_start NULL
845 #define __spectre_bhb_clearbhb_start NULL
846
847 static void kvm_setup_bhb_slot(const char *hyp_vecs_start) { };
848 #endif /* CONFIG_KVM */
849
850 static bool is_spectrev2_safe(void)
851 {
852         return !is_midr_in_range_list(read_cpuid_id(),
853                                       arm64_bp_harden_smccc_cpus);
854 }
855
856 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
857 {
858         enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
859
860         if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
861                 return;
862
863         if (!is_spectrev2_safe() &&  !__hardenbp_enab) {
864                 /* No point mitigating Spectre-BHB alone. */
865         } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
866                 pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
867         } else if (cpu_mitigations_off()) {
868                 pr_info_once("spectre-bhb mitigation disabled by command line option\n");
869         } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
870                 state = SPECTRE_MITIGATED;
871         } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
872                 kvm_setup_bhb_slot(__spectre_bhb_clearbhb_start);
873                 this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
874
875                 state = SPECTRE_MITIGATED;
876         } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
877                 switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) {
878                 case 8:
879                         /*
880                          * A57/A72-r0 will already have selected the
881                          * spectre-indirect vector, which is sufficient
882                          * for BHB too.
883                          */
884                         if (!__this_cpu_read(bp_hardening_data.fn))
885                                 kvm_setup_bhb_slot(__spectre_bhb_loop_k8_start);
886                         break;
887                 case 24:
888                         kvm_setup_bhb_slot(__spectre_bhb_loop_k24_start);
889                         break;
890                 case 32:
891                         kvm_setup_bhb_slot(__spectre_bhb_loop_k32_start);
892                         break;
893                 default:
894                         WARN_ON_ONCE(1);
895                 }
896                 this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
897
898                 state = SPECTRE_MITIGATED;
899         } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
900                 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
901                 if (fw_state == SPECTRE_MITIGATED) {
902                         kvm_setup_bhb_slot(__smccc_workaround_3_smc_start);
903                         this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
904
905                         /*
906                          * With WA3 in the vectors, the WA1 calls can be
907                          * removed.
908                          */
909                         __this_cpu_write(bp_hardening_data.fn, NULL);
910
911                         state = SPECTRE_MITIGATED;
912                 }
913         }
914
915         update_mitigation_state(&spectre_bhb_state, state);
916 }
917
918 /* Patched to correct the immediate */
919 void __init spectre_bhb_patch_loop_iter(struct alt_instr *alt,
920                                         __le32 *origptr, __le32 *updptr, int nr_inst)
921 {
922         u8 rd;
923         u32 insn;
924         u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
925
926         BUG_ON(nr_inst != 1); /* MOV -> MOV */
927
928         if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
929                 return;
930
931         insn = le32_to_cpu(*origptr);
932         rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
933         insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
934                                          AARCH64_INSN_VARIANT_64BIT,
935                                          AARCH64_INSN_MOVEWIDE_ZERO);
936         *updptr++ = cpu_to_le32(insn);
937 }
938 #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */