2 * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #ifndef __ASM_CPUFEATURE_H
10 #define __ASM_CPUFEATURE_H
12 #include <asm/cpucaps.h>
13 #include <asm/cputype.h>
14 #include <asm/hwcap.h>
15 #include <asm/sysreg.h>
18 * In the arm64 world (as in the ARM world), elf_hwcap is used both internally
19 * in the kernel and for user space to keep track of which optional features
20 * are supported by the current system. So let's map feature 'x' to HWCAP_x.
21 * Note that HWCAP_x constants are bit fields so we need to take the log.
24 #define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
25 #define cpu_feature(x) ilog2(HWCAP_ ## x)
29 #include <linux/bug.h>
30 #include <linux/jump_label.h>
31 #include <linux/kernel.h>
34 * CPU feature register tracking
36 * The safe value of a CPUID feature field is dependent on the implications
37 * of the values assigned to it by the architecture. Based on the relationship
38 * between the values, the features are classified into 3 types - LOWER_SAFE,
39 * HIGHER_SAFE and EXACT.
41 * The lowest value of all the CPUs is chosen for LOWER_SAFE and highest
42 * for HIGHER_SAFE. It is expected that all CPUs have the same value for
43 * a field when EXACT is specified, failing which, the safe value specified
44 * in the table is chosen.
48 FTR_EXACT, /* Use a predefined safe value */
49 FTR_LOWER_SAFE, /* Smaller value is safe */
50 FTR_HIGHER_SAFE, /* Bigger value is safe */
51 FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
54 #define FTR_STRICT true /* SANITY check strict matching required */
55 #define FTR_NONSTRICT false /* SANITY check ignored */
57 #define FTR_SIGNED true /* Value should be treated as signed */
58 #define FTR_UNSIGNED false /* Value should be treated as unsigned */
60 #define FTR_VISIBLE true /* Feature visible to the user space */
61 #define FTR_HIDDEN false /* Feature is hidden from the user */
63 #define FTR_VISIBLE_IF_IS_ENABLED(config) \
64 (IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN)
66 struct arm64_ftr_bits {
67 bool sign; /* Value is signed ? */
69 bool strict; /* CPU Sanity check: strict matching required ? */
73 s64 safe_val; /* safe value for FTR_EXACT features */
77 * @arm64_ftr_reg - Feature register
78 * @strict_mask Bits which should match across all CPUs for sanity.
79 * @sys_val Safe value across the CPUs (system view)
81 struct arm64_ftr_reg {
87 const struct arm64_ftr_bits *ftr_bits;
90 extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
95 * We use arm64_cpu_capabilities to represent system features, errata work
96 * arounds (both used internally by kernel and tracked in cpu_hwcaps) and
97 * ELF HWCAPs (which are exposed to user).
99 * To support systems with heterogeneous CPUs, we need to make sure that we
100 * detect the capabilities correctly on the system and take appropriate
101 * measures to ensure there are no incompatibilities.
103 * This comment tries to explain how we treat the capabilities.
104 * Each capability has the following list of attributes :
106 * 1) Scope of Detection : The system detects a given capability by
107 * performing some checks at runtime. This could be, e.g, checking the
108 * value of a field in CPU ID feature register or checking the cpu
109 * model. The capability provides a call back ( @matches() ) to
110 * perform the check. Scope defines how the checks should be performed.
111 * There are three cases:
113 * a) SCOPE_LOCAL_CPU: check all the CPUs and "detect" if at least one
114 * matches. This implies, we have to run the check on all the
115 * booting CPUs, until the system decides that state of the
116 * capability is finalised. (See section 2 below)
118 * b) SCOPE_SYSTEM: check all the CPUs and "detect" if all the CPUs
119 * matches. This implies, we run the check only once, when the
120 * system decides to finalise the state of the capability. If the
121 * capability relies on a field in one of the CPU ID feature
122 * registers, we use the sanitised value of the register from the
123 * CPU feature infrastructure to make the decision.
125 * c) SCOPE_BOOT_CPU: Check only on the primary boot CPU to detect the
126 * feature. This category is for features that are "finalised"
127 * (or used) by the kernel very early even before the SMP cpus
130 * The process of detection is usually denoted by "update" capability
133 * 2) Finalise the state : The kernel should finalise the state of a
134 * capability at some point during its execution and take necessary
135 * actions if any. Usually, this is done, after all the boot-time
136 * enabled CPUs are brought up by the kernel, so that it can make
137 * better decision based on the available set of CPUs. However, there
138 * are some special cases, where the action is taken during the early
139 * boot by the primary boot CPU. (e.g, running the kernel at EL2 with
140 * Virtualisation Host Extensions). The kernel usually disallows any
141 * changes to the state of a capability once it finalises the capability
142 * and takes any action, as it may be impossible to execute the actions
143 * safely. A CPU brought up after a capability is "finalised" is
144 * referred to as "Late CPU" w.r.t the capability. e.g, all secondary
145 * CPUs are treated "late CPUs" for capabilities determined by the boot
148 * At the moment there are two passes of finalising the capabilities.
149 * a) Boot CPU scope capabilities - Finalised by primary boot CPU via
150 * setup_boot_cpu_capabilities().
151 * b) Everything except (a) - Run via setup_system_capabilities().
153 * 3) Verification: When a CPU is brought online (e.g, by user or by the
154 * kernel), the kernel should make sure that it is safe to use the CPU,
155 * by verifying that the CPU is compliant with the state of the
156 * capabilities finalised already. This happens via :
158 * secondary_start_kernel()-> check_local_cpu_capabilities()
160 * As explained in (2) above, capabilities could be finalised at
161 * different points in the execution. Each newly booted CPU is verified
162 * against the capabilities that have been finalised by the time it
165 * a) SCOPE_BOOT_CPU : All CPUs are verified against the capability
166 * except for the primary boot CPU.
168 * b) SCOPE_LOCAL_CPU, SCOPE_SYSTEM: All CPUs hotplugged on by the
169 * user after the kernel boot are verified against the capability.
171 * If there is a conflict, the kernel takes an action, based on the
172 * severity (e.g, a CPU could be prevented from booting or cause a
173 * kernel panic). The CPU is allowed to "affect" the state of the
174 * capability, if it has not been finalised already. See section 5
175 * for more details on conflicts.
177 * 4) Action: As mentioned in (2), the kernel can take an action for each
178 * detected capability, on all CPUs on the system. Appropriate actions
179 * include, turning on an architectural feature, modifying the control
180 * registers (e.g, SCTLR, TCR etc.) or patching the kernel via
181 * alternatives. The kernel patching is batched and performed at later
182 * point. The actions are always initiated only after the capability
183 * is finalised. This is usally denoted by "enabling" the capability.
184 * The actions are initiated as follows :
185 * a) Action is triggered on all online CPUs, after the capability is
186 * finalised, invoked within the stop_machine() context from
187 * enable_cpu_capabilitie().
189 * b) Any late CPU, brought up after (1), the action is triggered via:
191 * check_local_cpu_capabilities() -> verify_local_cpu_capabilities()
193 * 5) Conflicts: Based on the state of the capability on a late CPU vs.
194 * the system state, we could have the following combinations :
196 * x-----------------------------x
197 * | Type | System | Late CPU |
198 * |-----------------------------|
200 * |-----------------------------|
202 * x-----------------------------x
204 * Two separate flag bits are defined to indicate whether each kind of
205 * conflict can be allowed:
206 * ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU - Case(a) is allowed
207 * ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU - Case(b) is allowed
209 * Case (a) is not permitted for a capability that the system requires
210 * all CPUs to have in order for the capability to be enabled. This is
211 * typical for capabilities that represent enhanced functionality.
213 * Case (b) is not permitted for a capability that must be enabled
214 * during boot if any CPU in the system requires it in order to run
215 * safely. This is typical for erratum work arounds that cannot be
216 * enabled after the corresponding capability is finalised.
218 * In some non-typical cases either both (a) and (b), or neither,
219 * should be permitted. This can be described by including neither
220 * or both flags in the capability's type field.
225 * Decide how the capability is detected.
226 * On any local CPU vs System wide vs the primary boot CPU
228 #define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0))
229 #define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1))
231 * The capabilitiy is detected on the Boot CPU and is used by kernel
232 * during early boot. i.e, the capability should be "detected" and
233 * "enabled" as early as possibly on all booting CPUs.
235 #define ARM64_CPUCAP_SCOPE_BOOT_CPU ((u16)BIT(2))
236 #define ARM64_CPUCAP_SCOPE_MASK \
237 (ARM64_CPUCAP_SCOPE_SYSTEM | \
238 ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
239 ARM64_CPUCAP_SCOPE_BOOT_CPU)
241 #define SCOPE_SYSTEM ARM64_CPUCAP_SCOPE_SYSTEM
242 #define SCOPE_LOCAL_CPU ARM64_CPUCAP_SCOPE_LOCAL_CPU
243 #define SCOPE_BOOT_CPU ARM64_CPUCAP_SCOPE_BOOT_CPU
244 #define SCOPE_ALL ARM64_CPUCAP_SCOPE_MASK
247 * Is it permitted for a late CPU to have this capability when system
248 * hasn't already enabled it ?
250 #define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4))
251 /* Is it safe for a late CPU to miss this capability when system has it */
252 #define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
255 * CPU errata workarounds that need to be enabled at boot time if one or
256 * more CPUs in the system requires it. When one of these capabilities
257 * has been enabled, it is safe to allow any CPU to boot that doesn't
258 * require the workaround. However, it is not safe if a "late" CPU
259 * requires a workaround and the system hasn't enabled it already.
261 #define ARM64_CPUCAP_LOCAL_CPU_ERRATUM \
262 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
264 * CPU feature detected at boot time based on system-wide value of a
265 * feature. It is safe for a late CPU to have this feature even though
266 * the system hasn't enabled it, although the featuer will not be used
267 * by Linux in this case. If the system has enabled this feature already,
268 * then every late CPU must have it.
270 #define ARM64_CPUCAP_SYSTEM_FEATURE \
271 (ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
273 * CPU feature detected at boot time based on feature of one or more CPUs.
274 * All possible conflicts for a late CPU are ignored.
276 #define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \
277 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
278 ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \
279 ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
282 * CPU feature detected at boot time, on one or more CPUs. A late CPU
283 * is not allowed to have the capability when the system doesn't have it.
284 * It is Ok for a late CPU to miss the feature.
286 #define ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE \
287 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
288 ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
291 * CPU feature used early in the boot based on the boot CPU. All secondary
292 * CPUs must match the state of the capability as detected by the boot CPU.
294 #define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU
296 struct arm64_cpu_capabilities {
300 bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
302 * Take the appropriate actions to enable this capability for this CPU.
303 * For each successfully booted CPU, this method is called for each
304 * globally detected capability.
306 void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
308 struct { /* To be used for erratum handling only */
309 struct midr_range midr_range;
310 const struct arm64_midr_revidr {
311 u32 midr_rv; /* revision/variant */
313 } * const fixed_revs;
316 const struct midr_range *midr_range_list;
317 struct { /* Feature register checking */
326 * A list of "matches/cpu_enable" pair for the same
327 * "capability" of the same "type" as described by the parent.
328 * Only matches(), cpu_enable() and fields relevant to these
329 * methods are significant in the list. The cpu_enable is
330 * invoked only if the corresponding entry "matches()".
331 * However, if a cpu_enable() method is associated
332 * with multiple matches(), care should be taken that either
333 * the match criteria are mutually exclusive, or that the
334 * method is robust against being called multiple times.
336 const struct arm64_cpu_capabilities *match_list;
340 static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
342 return cap->type & ARM64_CPUCAP_SCOPE_MASK;
346 cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
348 return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
352 cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
354 return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
357 extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
358 extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
359 extern struct static_key_false arm64_const_caps_ready;
361 bool this_cpu_has_cap(unsigned int cap);
363 static inline bool cpu_have_feature(unsigned int num)
365 return elf_hwcap & (1UL << num);
368 /* System capability check for constant caps */
369 static inline bool __cpus_have_const_cap(int num)
371 if (num >= ARM64_NCAPS)
373 return static_branch_unlikely(&cpu_hwcap_keys[num]);
376 static inline bool cpus_have_cap(unsigned int num)
378 if (num >= ARM64_NCAPS)
380 return test_bit(num, cpu_hwcaps);
383 static inline bool cpus_have_const_cap(int num)
385 if (static_branch_likely(&arm64_const_caps_ready))
386 return __cpus_have_const_cap(num);
388 return cpus_have_cap(num);
391 static inline void cpus_set_cap(unsigned int num)
393 if (num >= ARM64_NCAPS) {
394 pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n",
397 __set_bit(num, cpu_hwcaps);
401 static inline int __attribute_const__
402 cpuid_feature_extract_signed_field_width(u64 features, int field, int width)
404 return (s64)(features << (64 - width - field)) >> (64 - width);
407 static inline int __attribute_const__
408 cpuid_feature_extract_signed_field(u64 features, int field)
410 return cpuid_feature_extract_signed_field_width(features, field, 4);
413 static inline unsigned int __attribute_const__
414 cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
416 return (u64)(features << (64 - width - field)) >> (64 - width);
419 static inline unsigned int __attribute_const__
420 cpuid_feature_extract_unsigned_field(u64 features, int field)
422 return cpuid_feature_extract_unsigned_field_width(features, field, 4);
425 static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
427 return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
430 static inline u64 arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg)
432 return (reg->user_val | (reg->sys_val & reg->user_mask));
435 static inline int __attribute_const__
436 cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign)
439 cpuid_feature_extract_signed_field_width(features, field, width) :
440 cpuid_feature_extract_unsigned_field_width(features, field, width);
443 static inline int __attribute_const__
444 cpuid_feature_extract_field(u64 features, int field, bool sign)
446 return cpuid_feature_extract_field_width(features, field, 4, sign);
449 static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
451 return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign);
454 static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
456 return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 ||
457 cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1;
460 static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
462 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);
464 return val == ID_AA64PFR0_EL0_32BIT_64BIT;
467 static inline bool id_aa64pfr0_sve(u64 pfr0)
469 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT);
474 void __init setup_cpu_features(void);
475 void check_local_cpu_capabilities(void);
478 u64 read_sanitised_ftr_reg(u32 id);
480 static inline bool cpu_supports_mixed_endian_el0(void)
482 return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
485 static inline bool supports_csv2p3(int scope)
490 if (scope == SCOPE_LOCAL_CPU)
491 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
493 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
495 csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
496 ID_AA64PFR0_CSV2_SHIFT);
497 return csv2_val == 3;
500 static inline bool supports_clearbhb(int scope)
504 if (scope == SCOPE_LOCAL_CPU)
505 isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
507 isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
509 return cpuid_feature_extract_unsigned_field(isar2,
510 ID_AA64ISAR2_CLEARBHB_SHIFT);
513 static inline bool system_supports_32bit_el0(void)
515 return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
518 static inline bool system_supports_mixed_endian_el0(void)
520 return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1));
523 static inline bool system_supports_fpsimd(void)
525 return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
528 static inline bool system_uses_ttbr0_pan(void)
530 return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
531 !cpus_have_const_cap(ARM64_HAS_PAN);
534 static inline bool system_supports_sve(void)
536 return IS_ENABLED(CONFIG_ARM64_SVE) &&
537 cpus_have_const_cap(ARM64_SVE);
540 #define ARM64_SSBD_UNKNOWN -1
541 #define ARM64_SSBD_FORCE_DISABLE 0
542 #define ARM64_SSBD_KERNEL 1
543 #define ARM64_SSBD_FORCE_ENABLE 2
544 #define ARM64_SSBD_MITIGATED 3
546 static inline int arm64_get_ssbd_state(void)
548 #ifdef CONFIG_ARM64_SSBD
549 extern int ssbd_state;
552 return ARM64_SSBD_UNKNOWN;
556 void arm64_set_ssbd_mitigation(bool state);
558 /* Watch out, ordering is important here. */
559 enum mitigation_state {
565 enum mitigation_state arm64_get_spectre_bhb_state(void);
566 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
567 u8 spectre_bhb_loop_affected(int scope);
568 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
569 #endif /* __ASSEMBLY__ */