GNU Linux-libre 4.19.264-gnu1
[releases.git] / arch / arm / mm / proc-v7-bugs.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/arm-smccc.h>
3 #include <linux/kernel.h>
4 #include <linux/psci.h>
5 #include <linux/smp.h>
6
7 #include <asm/cp15.h>
8 #include <asm/cputype.h>
9 #include <asm/proc-fns.h>
10 #include <asm/spectre.h>
11 #include <asm/system_misc.h>
12
13 #ifdef CONFIG_ARM_PSCI
14 #define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED    1
15 static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
16 {
17         struct arm_smccc_res res;
18
19         arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
20                              ARM_SMCCC_ARCH_WORKAROUND_1, &res);
21
22         switch ((int)res.a0) {
23         case SMCCC_RET_SUCCESS:
24                 return SPECTRE_MITIGATED;
25
26         case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
27                 return SPECTRE_UNAFFECTED;
28
29         default:
30                 return SPECTRE_VULNERABLE;
31         }
32 }
33 #else
34 static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
35 {
36         return SPECTRE_VULNERABLE;
37 }
38 #endif
39
40 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
41 DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
42
43 extern void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
44 extern void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
45 extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
46 extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
47
48 static void harden_branch_predictor_bpiall(void)
49 {
50         write_sysreg(0, BPIALL);
51 }
52
53 static void harden_branch_predictor_iciallu(void)
54 {
55         write_sysreg(0, ICIALLU);
56 }
57
58 static void __maybe_unused call_smc_arch_workaround_1(void)
59 {
60         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
61 }
62
63 static void __maybe_unused call_hvc_arch_workaround_1(void)
64 {
65         arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
66 }
67
68 static unsigned int spectre_v2_install_workaround(unsigned int method)
69 {
70         const char *spectre_v2_method = NULL;
71         int cpu = smp_processor_id();
72
73         if (per_cpu(harden_branch_predictor_fn, cpu))
74                 return SPECTRE_MITIGATED;
75
76         switch (method) {
77         case SPECTRE_V2_METHOD_BPIALL:
78                 per_cpu(harden_branch_predictor_fn, cpu) =
79                         harden_branch_predictor_bpiall;
80                 spectre_v2_method = "BPIALL";
81                 break;
82
83         case SPECTRE_V2_METHOD_ICIALLU:
84                 per_cpu(harden_branch_predictor_fn, cpu) =
85                         harden_branch_predictor_iciallu;
86                 spectre_v2_method = "ICIALLU";
87                 break;
88
89         case SPECTRE_V2_METHOD_HVC:
90                 per_cpu(harden_branch_predictor_fn, cpu) =
91                         call_hvc_arch_workaround_1;
92                 cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
93                 spectre_v2_method = "hypervisor";
94                 break;
95
96         case SPECTRE_V2_METHOD_SMC:
97                 per_cpu(harden_branch_predictor_fn, cpu) =
98                         call_smc_arch_workaround_1;
99                 cpu_do_switch_mm = cpu_v7_smc_switch_mm;
100                 spectre_v2_method = "firmware";
101                 break;
102         }
103
104         if (spectre_v2_method)
105                 pr_info("CPU%u: Spectre v2: using %s workaround\n",
106                         smp_processor_id(), spectre_v2_method);
107
108         return SPECTRE_MITIGATED;
109 }
110 #else
111 static unsigned int spectre_v2_install_workaround(unsigned int method)
112 {
113         pr_info_once("Spectre V2: workarounds disabled by configuration\n");
114
115         return SPECTRE_VULNERABLE;
116 }
117 #endif
118
119 static void cpu_v7_spectre_v2_init(void)
120 {
121         unsigned int state, method = 0;
122
123         switch (read_cpuid_part()) {
124         case ARM_CPU_PART_CORTEX_A8:
125         case ARM_CPU_PART_CORTEX_A9:
126         case ARM_CPU_PART_CORTEX_A12:
127         case ARM_CPU_PART_CORTEX_A17:
128         case ARM_CPU_PART_CORTEX_A73:
129         case ARM_CPU_PART_CORTEX_A75:
130                 state = SPECTRE_MITIGATED;
131                 method = SPECTRE_V2_METHOD_BPIALL;
132                 break;
133
134         case ARM_CPU_PART_CORTEX_A15:
135         case ARM_CPU_PART_BRAHMA_B15:
136                 state = SPECTRE_MITIGATED;
137                 method = SPECTRE_V2_METHOD_ICIALLU;
138                 break;
139
140         case ARM_CPU_PART_BRAHMA_B53:
141                 /* Requires no workaround */
142                 state = SPECTRE_UNAFFECTED;
143                 break;
144
145         default:
146                 /* Other ARM CPUs require no workaround */
147                 if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) {
148                         state = SPECTRE_UNAFFECTED;
149                         break;
150                 }
151                 /* fallthrough */
152         /* Cortex A57/A72 require firmware workaround */
153         case ARM_CPU_PART_CORTEX_A57:
154         case ARM_CPU_PART_CORTEX_A72: {
155                 struct arm_smccc_res res;
156
157                 state = spectre_v2_get_cpu_fw_mitigation_state();
158                 if (state != SPECTRE_MITIGATED)
159                         break;
160
161                 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
162                         break;
163
164                 switch (psci_ops.conduit) {
165                 case PSCI_CONDUIT_HVC:
166                         arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
167                                           ARM_SMCCC_ARCH_WORKAROUND_1, &res);
168                         if ((int)res.a0 != 0)
169                                 break;
170                         method = SPECTRE_V2_METHOD_HVC;
171                         break;
172
173                 case PSCI_CONDUIT_SMC:
174                         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
175                                           ARM_SMCCC_ARCH_WORKAROUND_1, &res);
176                         if ((int)res.a0 != 0)
177                                 break;
178                         method = SPECTRE_V2_METHOD_SMC;
179                         break;
180
181                 default:
182                         state = SPECTRE_VULNERABLE;
183                         break;
184                 }
185         }
186         }
187
188         if (state == SPECTRE_MITIGATED)
189                 state = spectre_v2_install_workaround(method);
190
191         spectre_v2_update_state(state, method);
192 }
193
194 #ifdef CONFIG_HARDEN_BRANCH_HISTORY
195 static int spectre_bhb_method;
196
197 static const char *spectre_bhb_method_name(int method)
198 {
199         switch (method) {
200         case SPECTRE_V2_METHOD_LOOP8:
201                 return "loop";
202
203         case SPECTRE_V2_METHOD_BPIALL:
204                 return "BPIALL";
205
206         default:
207                 return "unknown";
208         }
209 }
210
211 static int spectre_bhb_install_workaround(int method)
212 {
213         if (spectre_bhb_method != method) {
214                 if (spectre_bhb_method) {
215                         pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n",
216                                smp_processor_id());
217
218                         return SPECTRE_VULNERABLE;
219                 }
220
221                 if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE)
222                         return SPECTRE_VULNERABLE;
223
224                 spectre_bhb_method = method;
225
226                 pr_info("CPU%u: Spectre BHB: enabling %s workaround for all CPUs\n",
227                         smp_processor_id(), spectre_bhb_method_name(method));
228         }
229
230         return SPECTRE_MITIGATED;
231 }
232 #else
233 static int spectre_bhb_install_workaround(int method)
234 {
235         return SPECTRE_VULNERABLE;
236 }
237 #endif
238
239 static void cpu_v7_spectre_bhb_init(void)
240 {
241         unsigned int state, method = 0;
242
243         switch (read_cpuid_part()) {
244         case ARM_CPU_PART_CORTEX_A15:
245         case ARM_CPU_PART_BRAHMA_B15:
246         case ARM_CPU_PART_CORTEX_A57:
247         case ARM_CPU_PART_CORTEX_A72:
248                 state = SPECTRE_MITIGATED;
249                 method = SPECTRE_V2_METHOD_LOOP8;
250                 break;
251
252         case ARM_CPU_PART_CORTEX_A73:
253         case ARM_CPU_PART_CORTEX_A75:
254                 state = SPECTRE_MITIGATED;
255                 method = SPECTRE_V2_METHOD_BPIALL;
256                 break;
257
258         default:
259                 state = SPECTRE_UNAFFECTED;
260                 break;
261         }
262
263         if (state == SPECTRE_MITIGATED)
264                 state = spectre_bhb_install_workaround(method);
265
266         spectre_v2_update_state(state, method);
267 }
268
269 static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
270                                                   u32 mask, const char *msg)
271 {
272         u32 aux_cr;
273
274         asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (aux_cr));
275
276         if ((aux_cr & mask) != mask) {
277                 if (!*warned)
278                         pr_err("CPU%u: %s", smp_processor_id(), msg);
279                 *warned = true;
280                 return false;
281         }
282         return true;
283 }
284
285 static DEFINE_PER_CPU(bool, spectre_warned);
286
287 static bool check_spectre_auxcr(bool *warned, u32 bit)
288 {
289         return IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) &&
290                 cpu_v7_check_auxcr_set(warned, bit,
291                                        "Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n");
292 }
293
294 void cpu_v7_ca8_ibe(void)
295 {
296         if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
297                 cpu_v7_spectre_v2_init();
298 }
299
300 void cpu_v7_ca15_ibe(void)
301 {
302         if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
303                 cpu_v7_spectre_v2_init();
304         cpu_v7_spectre_bhb_init();
305 }
306
307 void cpu_v7_bugs_init(void)
308 {
309         cpu_v7_spectre_v2_init();
310         cpu_v7_spectre_bhb_init();
311 }