2 * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #ifndef __ASM_CPUFEATURE_H
10 #define __ASM_CPUFEATURE_H
12 #include <asm/cpucaps.h>
13 #include <asm/cputype.h>
14 #include <asm/hwcap.h>
15 #include <asm/sysreg.h>
18 * In the arm64 world (as in the ARM world), elf_hwcap is used both internally
19 * in the kernel and for user space to keep track of which optional features
20 * are supported by the current system. So let's map feature 'x' to HWCAP_x.
21 * Note that HWCAP_x constants are bit fields so we need to take the log.
24 #define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
25 #define cpu_feature(x) ilog2(HWCAP_ ## x)
29 #include <linux/bug.h>
30 #include <linux/jump_label.h>
31 #include <linux/kernel.h>
33 /* CPU feature register tracking */
35 FTR_EXACT, /* Use a predefined safe value */
36 FTR_LOWER_SAFE, /* Smaller value is safe */
37 FTR_HIGHER_SAFE, /* Bigger value is safe */
38 FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
41 #define FTR_STRICT true /* SANITY check strict matching required */
42 #define FTR_NONSTRICT false /* SANITY check ignored */
44 #define FTR_SIGNED true /* Value should be treated as signed */
45 #define FTR_UNSIGNED false /* Value should be treated as unsigned */
47 struct arm64_ftr_bits {
48 bool sign; /* Value is signed ? */
49 bool strict; /* CPU Sanity check: strict matching required ? */
53 s64 safe_val; /* safe value for FTR_EXACT features */
57 * @arm64_ftr_reg - Feature register
58 * @strict_mask Bits which should match across all CPUs for sanity.
59 * @sys_val Safe value across the CPUs (system view)
61 struct arm64_ftr_reg {
65 const struct arm64_ftr_bits *ftr_bits;
68 extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
73 * We use arm64_cpu_capabilities to represent system features, errata work
74 * arounds (both used internally by kernel and tracked in cpu_hwcaps) and
75 * ELF HWCAPs (which are exposed to user).
77 * To support systems with heterogeneous CPUs, we need to make sure that we
78 * detect the capabilities correctly on the system and take appropriate
79 * measures to ensure there are no incompatibilities.
81 * This comment tries to explain how we treat the capabilities.
82 * Each capability has the following list of attributes :
84 * 1) Scope of Detection : The system detects a given capability by
85 * performing some checks at runtime. This could be, e.g, checking the
86 * value of a field in CPU ID feature register or checking the cpu
87 * model. The capability provides a call back ( @matches() ) to
88 * perform the check. Scope defines how the checks should be performed.
89 * There are two cases:
91 * a) SCOPE_LOCAL_CPU: check all the CPUs and "detect" if at least one
92 * matches. This implies, we have to run the check on all the
93 * booting CPUs, until the system decides that state of the
94 * capability is finalised. (See section 2 below)
96 * b) SCOPE_SYSTEM: check all the CPUs and "detect" if all the CPUs
97 * matches. This implies, we run the check only once, when the
98 * system decides to finalise the state of the capability. If the
99 * capability relies on a field in one of the CPU ID feature
100 * registers, we use the sanitised value of the register from the
101 * CPU feature infrastructure to make the decision.
103 * The process of detection is usually denoted by "update" capability
106 * 2) Finalise the state : The kernel should finalise the state of a
107 * capability at some point during its execution and take necessary
108 * actions if any. Usually, this is done, after all the boot-time
109 * enabled CPUs are brought up by the kernel, so that it can make
110 * better decision based on the available set of CPUs. However, there
111 * are some special cases, where the action is taken during the early
112 * boot by the primary boot CPU. (e.g, running the kernel at EL2 with
113 * Virtualisation Host Extensions). The kernel usually disallows any
114 * changes to the state of a capability once it finalises the capability
115 * and takes any action, as it may be impossible to execute the actions
116 * safely. A CPU brought up after a capability is "finalised" is
117 * referred to as "Late CPU" w.r.t the capability. e.g, all secondary
118 * CPUs are treated "late CPUs" for capabilities determined by the boot
121 * 3) Verification: When a CPU is brought online (e.g, by user or by the
122 * kernel), the kernel should make sure that it is safe to use the CPU,
123 * by verifying that the CPU is compliant with the state of the
124 * capabilities finalised already. This happens via :
126 * secondary_start_kernel()-> check_local_cpu_capabilities()
128 * As explained in (2) above, capabilities could be finalised at
129 * different points in the execution. Each CPU is verified against the
130 * "finalised" capabilities and if there is a conflict, the kernel takes
131 * an action, based on the severity (e.g, a CPU could be prevented from
132 * booting or cause a kernel panic). The CPU is allowed to "affect" the
133 * state of the capability, if it has not been finalised already.
134 * See section 5 for more details on conflicts.
136 * 4) Action: As mentioned in (2), the kernel can take an action for each
137 * detected capability, on all CPUs on the system. Appropriate actions
138 * include, turning on an architectural feature, modifying the control
139 * registers (e.g, SCTLR, TCR etc.) or patching the kernel via
140 * alternatives. The kernel patching is batched and performed at later
141 * point. The actions are always initiated only after the capability
142 * is finalised. This is usally denoted by "enabling" the capability.
143 * The actions are initiated as follows :
144 * a) Action is triggered on all online CPUs, after the capability is
145 * finalised, invoked within the stop_machine() context from
146 * enable_cpu_capabilitie().
148 * b) Any late CPU, brought up after (1), the action is triggered via:
150 * check_local_cpu_capabilities() -> verify_local_cpu_capabilities()
152 * 5) Conflicts: Based on the state of the capability on a late CPU vs.
153 * the system state, we could have the following combinations :
155 * x-----------------------------x
156 * | Type | System | Late CPU |
157 * |-----------------------------|
159 * |-----------------------------|
161 * x-----------------------------x
163 * Two separate flag bits are defined to indicate whether each kind of
164 * conflict can be allowed:
165 * ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU - Case(a) is allowed
166 * ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU - Case(b) is allowed
168 * Case (a) is not permitted for a capability that the system requires
169 * all CPUs to have in order for the capability to be enabled. This is
170 * typical for capabilities that represent enhanced functionality.
172 * Case (b) is not permitted for a capability that must be enabled
173 * during boot if any CPU in the system requires it in order to run
174 * safely. This is typical for erratum work arounds that cannot be
175 * enabled after the corresponding capability is finalised.
177 * In some non-typical cases either both (a) and (b), or neither,
178 * should be permitted. This can be described by including neither
179 * or both flags in the capability's type field.
183 /* Decide how the capability is detected. On a local CPU vs System wide */
184 #define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0))
185 #define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1))
186 #define ARM64_CPUCAP_SCOPE_MASK \
187 (ARM64_CPUCAP_SCOPE_SYSTEM | \
188 ARM64_CPUCAP_SCOPE_LOCAL_CPU)
190 #define SCOPE_SYSTEM ARM64_CPUCAP_SCOPE_SYSTEM
191 #define SCOPE_LOCAL_CPU ARM64_CPUCAP_SCOPE_LOCAL_CPU
194 * Is it permitted for a late CPU to have this capability when system
195 * hasn't already enabled it ?
197 #define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4))
198 /* Is it safe for a late CPU to miss this capability when system has it */
199 #define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
202 * CPU errata workarounds that need to be enabled at boot time if one or
203 * more CPUs in the system requires it. When one of these capabilities
204 * has been enabled, it is safe to allow any CPU to boot that doesn't
205 * require the workaround. However, it is not safe if a "late" CPU
206 * requires a workaround and the system hasn't enabled it already.
208 #define ARM64_CPUCAP_LOCAL_CPU_ERRATUM \
209 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
211 * CPU feature detected at boot time based on system-wide value of a
212 * feature. It is safe for a late CPU to have this feature even though
213 * the system hasn't enabled it, although the featuer will not be used
214 * by Linux in this case. If the system has enabled this feature already,
215 * then every late CPU must have it.
217 #define ARM64_CPUCAP_SYSTEM_FEATURE \
218 (ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
220 struct arm64_cpu_capabilities {
224 bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
226 * Take the appropriate actions to enable this capability for this CPU.
227 * For each successfully booted CPU, this method is called for each
228 * globally detected capability.
230 void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
232 struct { /* To be used for erratum handling only */
233 struct midr_range midr_range;
236 const struct midr_range *midr_range_list;
237 struct { /* Feature register checking */
248 static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
250 return cap->type & ARM64_CPUCAP_SCOPE_MASK;
254 cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
256 return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
260 cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
262 return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
265 extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
266 extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
267 extern struct static_key_false arm64_const_caps_ready;
269 bool this_cpu_has_cap(unsigned int cap);
271 static inline bool cpu_have_feature(unsigned int num)
273 return elf_hwcap & (1UL << num);
276 /* System capability check for constant caps */
277 static inline bool __cpus_have_const_cap(int num)
279 if (num >= ARM64_NCAPS)
281 return static_branch_unlikely(&cpu_hwcap_keys[num]);
284 static inline bool cpus_have_cap(unsigned int num)
286 if (num >= ARM64_NCAPS)
288 return test_bit(num, cpu_hwcaps);
291 static inline bool cpus_have_const_cap(int num)
293 if (static_branch_likely(&arm64_const_caps_ready))
294 return __cpus_have_const_cap(num);
296 return cpus_have_cap(num);
299 static inline void cpus_set_cap(unsigned int num)
301 if (num >= ARM64_NCAPS) {
302 pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n",
305 __set_bit(num, cpu_hwcaps);
309 static inline int __attribute_const__
310 cpuid_feature_extract_signed_field_width(u64 features, int field, int width)
312 return (s64)(features << (64 - width - field)) >> (64 - width);
315 static inline int __attribute_const__
316 cpuid_feature_extract_signed_field(u64 features, int field)
318 return cpuid_feature_extract_signed_field_width(features, field, 4);
321 static inline unsigned int __attribute_const__
322 cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
324 return (u64)(features << (64 - width - field)) >> (64 - width);
327 static inline unsigned int __attribute_const__
328 cpuid_feature_extract_unsigned_field(u64 features, int field)
330 return cpuid_feature_extract_unsigned_field_width(features, field, 4);
333 static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
335 return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
338 static inline int __attribute_const__
339 cpuid_feature_extract_field(u64 features, int field, bool sign)
342 cpuid_feature_extract_signed_field(features, field) :
343 cpuid_feature_extract_unsigned_field(features, field);
346 static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
348 return (s64)cpuid_feature_extract_field(val, ftrp->shift, ftrp->sign);
351 static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
353 return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 ||
354 cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1;
357 static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
359 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);
361 return val == ID_AA64PFR0_EL0_32BIT_64BIT;
364 void __init setup_cpu_features(void);
365 void check_local_cpu_capabilities(void);
368 u64 read_system_reg(u32 id);
370 static inline bool cpu_supports_mixed_endian_el0(void)
372 return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
375 static inline bool supports_csv2p3(int scope)
380 if (scope == SCOPE_LOCAL_CPU)
381 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
383 pfr0 = read_system_reg(SYS_ID_AA64PFR0_EL1);
385 csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
386 ID_AA64PFR0_CSV2_SHIFT);
387 return csv2_val == 3;
390 static inline bool supports_clearbhb(int scope)
394 if (scope == SCOPE_LOCAL_CPU)
395 isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
397 isar2 = read_system_reg(SYS_ID_AA64ISAR2_EL1);
399 return cpuid_feature_extract_unsigned_field(isar2,
400 ID_AA64ISAR2_CLEARBHB_SHIFT);
403 static inline bool system_supports_32bit_el0(void)
405 return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
408 static inline bool system_supports_mixed_endian_el0(void)
410 return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
413 #define ARM64_SSBD_UNKNOWN -1
414 #define ARM64_SSBD_FORCE_DISABLE 0
415 #define ARM64_SSBD_KERNEL 1
416 #define ARM64_SSBD_FORCE_ENABLE 2
417 #define ARM64_SSBD_MITIGATED 3
419 static inline int arm64_get_ssbd_state(void)
421 #ifdef CONFIG_ARM64_SSBD
422 extern int ssbd_state;
425 return ARM64_SSBD_UNKNOWN;
429 #ifdef CONFIG_ARM64_SSBD
430 void arm64_set_ssbd_mitigation(bool state);
432 static inline void arm64_set_ssbd_mitigation(bool state) {}
435 /* Watch out, ordering is important here. */
436 enum mitigation_state {
442 enum mitigation_state arm64_get_spectre_bhb_state(void);
443 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
444 u8 spectre_bhb_loop_affected(int scope);
445 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry);
447 #endif /* __ASSEMBLY__ */