4 This tool is for checking the security hardening options of the Linux kernel.
6 Author: Alexander Popov <alex.popov@linux.com>
8 This module contains knowledge for checks.
11 # pylint: disable=missing-function-docstring,line-too-long,invalid-name
12 # pylint: disable=too-many-branches,too-many-statements
14 from .engine import KconfigCheck, CmdlineCheck, SysctlCheck, VersionCheck, OR, AND
17 def add_kconfig_checks(l, arch):
18 assert(arch), 'empty arch'
20 # Calling the KconfigCheck class constructor:
21 # KconfigCheck(reason, decision, name, expected)
23 # [!] Don't add CmdlineChecks in add_kconfig_checks() to avoid wrong results
24 # when the tool doesn't check the cmdline.
26 efi_not_set = KconfigCheck('-', '-', 'EFI', 'is not set')
27 cc_is_gcc = KconfigCheck('-', '-', 'CC_IS_GCC', 'y') # exists since v4.18
28 cc_is_clang = KconfigCheck('-', '-', 'CC_IS_CLANG', 'y') # exists since v4.18
30 modules_not_set = KconfigCheck('cut_attack_surface', 'kspp', 'MODULES', 'is not set') # radical, but may be useful in some cases
31 devmem_not_set = KconfigCheck('cut_attack_surface', 'kspp', 'DEVMEM', 'is not set') # refers to LOCKDOWN
32 bpf_syscall_not_set = KconfigCheck('cut_attack_surface', 'lockdown', 'BPF_SYSCALL', 'is not set') # refers to LOCKDOWN
34 # 'self_protection', 'defconfig'
35 l += [KconfigCheck('self_protection', 'defconfig', 'BUG', 'y')]
36 l += [KconfigCheck('self_protection', 'defconfig', 'SLUB_DEBUG', 'y')]
37 l += [KconfigCheck('self_protection', 'defconfig', 'THREAD_INFO_IN_TASK', 'y')]
38 gcc_plugins_support_is_set = KconfigCheck('self_protection', 'defconfig', 'GCC_PLUGINS', 'y')
39 l += [gcc_plugins_support_is_set]
40 iommu_support_is_set = KconfigCheck('self_protection', 'defconfig', 'IOMMU_SUPPORT', 'y')
41 l += [iommu_support_is_set] # is needed for mitigating DMA attacks
42 l += [OR(KconfigCheck('self_protection', 'defconfig', 'STACKPROTECTOR', 'y'),
43 KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR', 'y'),
44 KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR_REGULAR', 'y'),
45 KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR_AUTO', 'y'),
46 KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR_STRONG', 'y'))]
47 l += [OR(KconfigCheck('self_protection', 'defconfig', 'STACKPROTECTOR_STRONG', 'y'),
48 KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR_STRONG', 'y'))]
49 l += [OR(KconfigCheck('self_protection', 'defconfig', 'STRICT_KERNEL_RWX', 'y'),
50 KconfigCheck('self_protection', 'defconfig', 'DEBUG_RODATA', 'y'))] # before v4.11
51 l += [OR(KconfigCheck('self_protection', 'defconfig', 'STRICT_MODULE_RWX', 'y'),
52 KconfigCheck('self_protection', 'defconfig', 'DEBUG_SET_MODULE_RONX', 'y'),
53 modules_not_set)] # DEBUG_SET_MODULE_RONX was before v4.11
54 l += [OR(KconfigCheck('self_protection', 'defconfig', 'REFCOUNT_FULL', 'y'),
55 VersionCheck((5, 5)))] # REFCOUNT_FULL is enabled by default since v5.5
56 if arch in ('X86_64', 'ARM64', 'X86_32'):
57 l += [KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_BASE', 'y')]
58 if arch in ('X86_64', 'ARM64', 'ARM'):
59 l += [KconfigCheck('self_protection', 'defconfig', 'VMAP_STACK', 'y')]
60 if arch in ('X86_64', 'X86_32'):
61 l += [KconfigCheck('self_protection', 'defconfig', 'SPECULATION_MITIGATIONS', 'y')]
62 l += [KconfigCheck('self_protection', 'defconfig', 'DEBUG_WX', 'y')]
63 l += [KconfigCheck('self_protection', 'defconfig', 'WERROR', 'y')]
64 l += [KconfigCheck('self_protection', 'defconfig', 'X86_MCE', 'y')]
65 l += [KconfigCheck('self_protection', 'defconfig', 'X86_MCE_INTEL', 'y')]
66 l += [KconfigCheck('self_protection', 'defconfig', 'X86_MCE_AMD', 'y')]
67 l += [KconfigCheck('self_protection', 'defconfig', 'RETPOLINE', 'y')]
68 l += [KconfigCheck('self_protection', 'defconfig', 'CPU_SRSO', 'y')]
69 l += [KconfigCheck('self_protection', 'defconfig', 'SYN_COOKIES', 'y')] # another reason?
70 microcode_is_set = KconfigCheck('self_protection', 'defconfig', 'MICROCODE', 'y')
71 l += [microcode_is_set] # is needed for mitigating CPU bugs
72 l += [OR(KconfigCheck('self_protection', 'defconfig', 'MICROCODE_INTEL', 'y'),
74 VersionCheck((6, 6))))] # MICROCODE_INTEL was included in MICROCODE since v6.6
75 l += [OR(KconfigCheck('self_protection', 'defconfig', 'MICROCODE_AMD', 'y'),
77 VersionCheck((6, 6))))] # MICROCODE_AMD was included in MICROCODE since v6.6
78 l += [OR(KconfigCheck('self_protection', 'defconfig', 'X86_SMAP', 'y'),
79 VersionCheck((5, 19)))] # X86_SMAP is enabled by default since v5.19
80 l += [OR(KconfigCheck('self_protection', 'defconfig', 'X86_UMIP', 'y'),
81 KconfigCheck('self_protection', 'defconfig', 'X86_INTEL_UMIP', 'y'))]
82 if arch in ('ARM64', 'ARM'):
83 l += [KconfigCheck('self_protection', 'defconfig', 'IOMMU_DEFAULT_DMA_STRICT', 'y')]
84 l += [KconfigCheck('self_protection', 'defconfig', 'IOMMU_DEFAULT_PASSTHROUGH', 'is not set')] # true if IOMMU_DEFAULT_DMA_STRICT is set
85 l += [KconfigCheck('self_protection', 'defconfig', 'STACKPROTECTOR_PER_TASK', 'y')]
87 l += [KconfigCheck('self_protection', 'defconfig', 'PAGE_TABLE_ISOLATION', 'y')]
88 l += [KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_MEMORY', 'y')]
89 l += [KconfigCheck('self_protection', 'defconfig', 'X86_KERNEL_IBT', 'y')]
90 l += [AND(KconfigCheck('self_protection', 'defconfig', 'INTEL_IOMMU', 'y'),
91 iommu_support_is_set)]
92 l += [AND(KconfigCheck('self_protection', 'defconfig', 'AMD_IOMMU', 'y'),
93 iommu_support_is_set)]
95 l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_PAN', 'y')]
96 l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_EPAN', 'y')]
97 l += [KconfigCheck('self_protection', 'defconfig', 'UNMAP_KERNEL_AT_EL0', 'y')]
98 l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_E0PD', 'y')]
99 l += [KconfigCheck('self_protection', 'defconfig', 'RODATA_FULL_DEFAULT_ENABLED', 'y')]
100 l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_PTR_AUTH_KERNEL', 'y')]
101 l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_BTI_KERNEL', 'y')]
102 l += [KconfigCheck('self_protection', 'defconfig', 'MITIGATE_SPECTRE_BRANCH_HISTORY', 'y')]
103 l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_MTE', 'y')]
104 l += [KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_MODULE_REGION_FULL', 'y')]
105 l += [OR(KconfigCheck('self_protection', 'defconfig', 'HARDEN_EL2_VECTORS', 'y'),
106 AND(KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_BASE', 'y'),
107 VersionCheck((5, 9))))] # HARDEN_EL2_VECTORS was included in RANDOMIZE_BASE in v5.9
108 l += [OR(KconfigCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_PREDICTOR', 'y'),
109 VersionCheck((5, 10)))] # HARDEN_BRANCH_PREDICTOR is enabled by default since v5.10
111 l += [KconfigCheck('self_protection', 'defconfig', 'CPU_SW_DOMAIN_PAN', 'y')]
112 l += [KconfigCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_PREDICTOR', 'y')]
113 l += [KconfigCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_HISTORY', 'y')]
114 l += [KconfigCheck('self_protection', 'defconfig', 'DEBUG_ALIGN_RODATA', 'y')]
116 # 'self_protection', 'kspp'
117 l += [KconfigCheck('self_protection', 'kspp', 'BUG_ON_DATA_CORRUPTION', 'y')]
118 l += [KconfigCheck('self_protection', 'kspp', 'SCHED_STACK_END_CHECK', 'y')]
119 l += [KconfigCheck('self_protection', 'kspp', 'SLAB_FREELIST_HARDENED', 'y')]
120 l += [KconfigCheck('self_protection', 'kspp', 'SLAB_FREELIST_RANDOM', 'y')]
121 l += [KconfigCheck('self_protection', 'kspp', 'SHUFFLE_PAGE_ALLOCATOR', 'y')]
122 l += [KconfigCheck('self_protection', 'kspp', 'FORTIFY_SOURCE', 'y')]
123 l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_LIST', 'y')]
124 l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_VIRTUAL', 'y')]
125 l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_SG', 'y')]
126 l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_CREDENTIALS', 'y')]
127 l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_NOTIFIERS', 'y')]
128 l += [KconfigCheck('self_protection', 'kspp', 'INIT_ON_ALLOC_DEFAULT_ON', 'y')]
129 l += [KconfigCheck('self_protection', 'kspp', 'HW_RANDOM_TPM', 'y')]
130 l += [KconfigCheck('self_protection', 'kspp', 'STATIC_USERMODEHELPER', 'y')] # needs userspace support
131 kfence_is_set = KconfigCheck('self_protection', 'kspp', 'KFENCE', 'y')
133 l += [AND(KconfigCheck('self_protection', 'my', 'KFENCE_SAMPLE_INTERVAL', 'is not off'),
135 randstruct_is_set = OR(KconfigCheck('self_protection', 'kspp', 'RANDSTRUCT_FULL', 'y'),
136 KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_RANDSTRUCT', 'y'))
137 l += [randstruct_is_set]
138 l += [AND(KconfigCheck('self_protection', 'kspp', 'RANDSTRUCT_PERFORMANCE', 'is not set'),
139 KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_RANDSTRUCT_PERFORMANCE', 'is not set'),
141 hardened_usercopy_is_set = KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY', 'y')
142 l += [hardened_usercopy_is_set]
143 l += [AND(KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY_FALLBACK', 'is not set'),
144 hardened_usercopy_is_set)] # usercopy whitelist violations should be prohibited
145 l += [AND(KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY_PAGESPAN', 'is not set'),
146 hardened_usercopy_is_set)] # this debugging for HARDENED_USERCOPY is not needed for security
147 l += [AND(KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_LATENT_ENTROPY', 'y'),
148 gcc_plugins_support_is_set)]
149 l += [OR(KconfigCheck('self_protection', 'kspp', 'MODULE_SIG', 'y'),
151 l += [OR(KconfigCheck('self_protection', 'kspp', 'MODULE_SIG_ALL', 'y'),
153 l += [OR(KconfigCheck('self_protection', 'kspp', 'MODULE_SIG_SHA512', 'y'),
155 l += [OR(KconfigCheck('self_protection', 'kspp', 'MODULE_SIG_FORCE', 'y'),
156 modules_not_set)] # refers to LOCKDOWN
157 l += [OR(KconfigCheck('self_protection', 'kspp', 'INIT_STACK_ALL_ZERO', 'y'),
158 KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_STRUCTLEAK_BYREF_ALL', 'y'))]
159 l += [OR(KconfigCheck('self_protection', 'kspp', 'INIT_ON_FREE_DEFAULT_ON', 'y'),
160 KconfigCheck('self_protection', 'kspp', 'PAGE_POISONING_ZERO', 'y'))]
161 # CONFIG_INIT_ON_FREE_DEFAULT_ON was added in v5.3.
162 # CONFIG_PAGE_POISONING_ZERO was removed in v5.11.
163 # Starting from v5.11 CONFIG_PAGE_POISONING unconditionally checks
164 # the 0xAA poison pattern on allocation.
165 # That brings higher performance penalty.
166 l += [OR(KconfigCheck('self_protection', 'kspp', 'EFI_DISABLE_PCI_DMA', 'y'),
168 l += [OR(KconfigCheck('self_protection', 'kspp', 'RESET_ATTACK_MITIGATION', 'y'),
169 efi_not_set)] # needs userspace support (systemd)
170 ubsan_bounds_is_set = KconfigCheck('self_protection', 'kspp', 'UBSAN_BOUNDS', 'y')
171 l += [ubsan_bounds_is_set]
172 l += [OR(KconfigCheck('self_protection', 'kspp', 'UBSAN_LOCAL_BOUNDS', 'y'),
173 AND(ubsan_bounds_is_set,
175 l += [AND(KconfigCheck('self_protection', 'kspp', 'UBSAN_TRAP', 'y'),
177 KconfigCheck('self_protection', 'kspp', 'UBSAN_SHIFT', 'is not set'),
178 KconfigCheck('self_protection', 'kspp', 'UBSAN_DIV_ZERO', 'is not set'),
179 KconfigCheck('self_protection', 'kspp', 'UBSAN_UNREACHABLE', 'is not set'),
180 KconfigCheck('self_protection', 'kspp', 'UBSAN_BOOL', 'is not set'),
181 KconfigCheck('self_protection', 'kspp', 'UBSAN_ENUM', 'is not set'),
182 KconfigCheck('self_protection', 'kspp', 'UBSAN_ALIGNMENT', 'is not set'))] # only array index bounds checking with traps
183 if arch in ('X86_64', 'ARM64', 'X86_32'):
184 l += [AND(KconfigCheck('self_protection', 'kspp', 'UBSAN_SANITIZE_ALL', 'y'),
185 ubsan_bounds_is_set)] # ARCH_HAS_UBSAN_SANITIZE_ALL is not enabled for ARM
186 stackleak_is_set = KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_STACKLEAK', 'y')
187 l += [AND(stackleak_is_set, gcc_plugins_support_is_set)]
188 l += [AND(KconfigCheck('self_protection', 'kspp', 'STACKLEAK_METRICS', 'is not set'),
190 gcc_plugins_support_is_set)]
191 l += [AND(KconfigCheck('self_protection', 'kspp', 'STACKLEAK_RUNTIME_DISABLE', 'is not set'),
193 gcc_plugins_support_is_set)]
194 l += [KconfigCheck('self_protection', 'kspp', 'RANDOMIZE_KSTACK_OFFSET_DEFAULT', 'y')]
195 if arch in ('X86_64', 'ARM64'):
196 cfi_clang_is_set = KconfigCheck('self_protection', 'kspp', 'CFI_CLANG', 'y')
197 l += [cfi_clang_is_set]
198 l += [AND(KconfigCheck('self_protection', 'kspp', 'CFI_PERMISSIVE', 'is not set'),
200 if arch in ('X86_64', 'X86_32'):
201 l += [KconfigCheck('self_protection', 'kspp', 'SCHED_CORE', 'y')]
202 l += [KconfigCheck('self_protection', 'kspp', 'DEFAULT_MMAP_MIN_ADDR', '65536')]
203 l += [KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_DMA_STRICT', 'y')]
204 l += [KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_PASSTHROUGH', 'is not set')] # true if IOMMU_DEFAULT_DMA_STRICT is set
205 l += [AND(KconfigCheck('self_protection', 'kspp', 'INTEL_IOMMU_DEFAULT_ON', 'y'),
206 iommu_support_is_set)]
207 if arch in ('ARM64', 'ARM'):
208 l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_WX', 'y')]
209 l += [KconfigCheck('self_protection', 'kspp', 'WERROR', 'y')]
210 l += [KconfigCheck('self_protection', 'kspp', 'DEFAULT_MMAP_MIN_ADDR', '32768')]
211 l += [KconfigCheck('self_protection', 'kspp', 'SYN_COOKIES', 'y')] # another reason?
213 l += [KconfigCheck('self_protection', 'kspp', 'SLS', 'y')] # vs CVE-2021-26341 in Straight-Line-Speculation
214 l += [AND(KconfigCheck('self_protection', 'kspp', 'INTEL_IOMMU_SVM', 'y'),
215 iommu_support_is_set)]
216 l += [AND(KconfigCheck('self_protection', 'kspp', 'AMD_IOMMU_V2', 'y'),
217 iommu_support_is_set)]
219 l += [KconfigCheck('self_protection', 'kspp', 'ARM64_SW_TTBR0_PAN', 'y')]
220 l += [KconfigCheck('self_protection', 'kspp', 'SHADOW_CALL_STACK', 'y')]
221 l += [KconfigCheck('self_protection', 'kspp', 'KASAN_HW_TAGS', 'y')] # see also: kasan=on, kasan.stacktrace=off, kasan.fault=panic
223 l += [KconfigCheck('self_protection', 'kspp', 'PAGE_TABLE_ISOLATION', 'y')]
224 l += [KconfigCheck('self_protection', 'kspp', 'HIGHMEM64G', 'y')]
225 l += [KconfigCheck('self_protection', 'kspp', 'X86_PAE', 'y')]
226 l += [AND(KconfigCheck('self_protection', 'kspp', 'INTEL_IOMMU', 'y'),
227 iommu_support_is_set)]
229 # 'self_protection', 'clipos'
230 l += [KconfigCheck('self_protection', 'clipos', 'SLAB_MERGE_DEFAULT', 'is not set')]
233 if arch in ('X86_64', 'ARM64', 'X86_32'):
234 l += [KconfigCheck('security_policy', 'defconfig', 'SECURITY', 'y')]
236 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY', 'y')]
237 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_YAMA', 'y')]
238 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_LANDLOCK', 'y')]
239 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_SELINUX_DISABLE', 'is not set')]
240 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_SELINUX_BOOTPARAM', 'is not set')]
241 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_SELINUX_DEVELOP', 'is not set')]
242 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_LOCKDOWN_LSM', 'y')]
243 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_LOCKDOWN_LSM_EARLY', 'y')]
244 l += [KconfigCheck('security_policy', 'kspp', 'LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY', 'y')]
245 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_WRITABLE_HOOKS', 'is not set')] # refers to SECURITY_SELINUX_DISABLE
246 l += [OR(KconfigCheck('security_policy', 'my', 'SECURITY_SELINUX', 'y'),
247 KconfigCheck('security_policy', 'my', 'SECURITY_APPARMOR', 'y'),
248 KconfigCheck('security_policy', 'my', 'SECURITY_SMACK', 'y'),
249 KconfigCheck('security_policy', 'my', 'SECURITY_TOMOYO', 'y'))] # one of major LSMs implementing MAC
251 # 'cut_attack_surface', 'defconfig'
252 l += [KconfigCheck('cut_attack_surface', 'defconfig', 'SECCOMP', 'y')]
253 l += [KconfigCheck('cut_attack_surface', 'defconfig', 'SECCOMP_FILTER', 'y')]
254 l += [OR(KconfigCheck('cut_attack_surface', 'defconfig', 'BPF_UNPRIV_DEFAULT_OFF', 'y'),
255 bpf_syscall_not_set)] # see unprivileged_bpf_disabled
256 if arch in ('X86_64', 'ARM64', 'X86_32'):
257 l += [OR(KconfigCheck('cut_attack_surface', 'defconfig', 'STRICT_DEVMEM', 'y'),
258 devmem_not_set)] # refers to LOCKDOWN
259 if arch in ('X86_64', 'X86_32'):
260 l += [KconfigCheck('cut_attack_surface', 'defconfig', 'X86_INTEL_TSX_MODE_OFF', 'y')] # tsx=off
262 # 'cut_attack_surface', 'kspp'
263 l += [KconfigCheck('cut_attack_surface', 'kspp', 'SECURITY_DMESG_RESTRICT', 'y')]
264 l += [KconfigCheck('cut_attack_surface', 'kspp', 'ACPI_CUSTOM_METHOD', 'is not set')] # refers to LOCKDOWN
265 l += [KconfigCheck('cut_attack_surface', 'kspp', 'COMPAT_BRK', 'is not set')]
266 l += [KconfigCheck('cut_attack_surface', 'kspp', 'DEVKMEM', 'is not set')] # refers to LOCKDOWN
267 l += [KconfigCheck('cut_attack_surface', 'kspp', 'BINFMT_MISC', 'is not set')]
268 l += [KconfigCheck('cut_attack_surface', 'kspp', 'INET_DIAG', 'is not set')]
269 l += [KconfigCheck('cut_attack_surface', 'kspp', 'KEXEC', 'is not set')] # refers to LOCKDOWN
270 l += [KconfigCheck('cut_attack_surface', 'kspp', 'PROC_KCORE', 'is not set')] # refers to LOCKDOWN
271 l += [KconfigCheck('cut_attack_surface', 'kspp', 'LEGACY_PTYS', 'is not set')]
272 l += [KconfigCheck('cut_attack_surface', 'kspp', 'HIBERNATION', 'is not set')] # refers to LOCKDOWN
273 l += [KconfigCheck('cut_attack_surface', 'kspp', 'COMPAT', 'is not set')]
274 l += [KconfigCheck('cut_attack_surface', 'kspp', 'IA32_EMULATION', 'is not set')]
275 l += [KconfigCheck('cut_attack_surface', 'kspp', 'X86_X32', 'is not set')]
276 l += [KconfigCheck('cut_attack_surface', 'kspp', 'X86_X32_ABI', 'is not set')]
277 l += [KconfigCheck('cut_attack_surface', 'kspp', 'MODIFY_LDT_SYSCALL', 'is not set')]
278 l += [KconfigCheck('cut_attack_surface', 'kspp', 'OABI_COMPAT', 'is not set')]
279 l += [KconfigCheck('cut_attack_surface', 'kspp', 'X86_MSR', 'is not set')] # refers to LOCKDOWN
280 l += [modules_not_set]
281 l += [devmem_not_set]
282 l += [OR(KconfigCheck('cut_attack_surface', 'kspp', 'IO_STRICT_DEVMEM', 'y'),
283 devmem_not_set)] # refers to LOCKDOWN
284 l += [AND(KconfigCheck('cut_attack_surface', 'kspp', 'LDISC_AUTOLOAD', 'is not set'),
285 KconfigCheck('cut_attack_surface', 'kspp', 'LDISC_AUTOLOAD', 'is present'))]
286 if arch in ('X86_64', 'X86_32'):
287 l += [KconfigCheck('cut_attack_surface', 'kspp', 'COMPAT_VDSO', 'is not set')]
288 # CONFIG_COMPAT_VDSO disabled ASLR of vDSO only on X86_64 and X86_32;
289 # on ARM64 this option has different meaning
291 l += [OR(KconfigCheck('cut_attack_surface', 'kspp', 'X86_VSYSCALL_EMULATION', 'is not set'),
292 KconfigCheck('cut_attack_surface', 'kspp', 'LEGACY_VSYSCALL_NONE', 'y'))]
293 # disabling X86_VSYSCALL_EMULATION turns vsyscall off completely,
294 # and LEGACY_VSYSCALL_NONE can be changed at boot time via the cmdline parameter
296 l += [OR(KconfigCheck('cut_attack_surface', 'kspp', 'STRICT_DEVMEM', 'y'),
297 devmem_not_set)] # refers to LOCKDOWN
299 # 'cut_attack_surface', 'grsec'
300 l += [KconfigCheck('cut_attack_surface', 'grsec', 'ZSMALLOC_STAT', 'is not set')]
301 l += [KconfigCheck('cut_attack_surface', 'grsec', 'PAGE_OWNER', 'is not set')]
302 l += [KconfigCheck('cut_attack_surface', 'grsec', 'DEBUG_KMEMLEAK', 'is not set')]
303 l += [KconfigCheck('cut_attack_surface', 'grsec', 'BINFMT_AOUT', 'is not set')]
304 l += [KconfigCheck('cut_attack_surface', 'grsec', 'KPROBE_EVENTS', 'is not set')]
305 l += [KconfigCheck('cut_attack_surface', 'grsec', 'UPROBE_EVENTS', 'is not set')]
306 l += [KconfigCheck('cut_attack_surface', 'grsec', 'GENERIC_TRACER', 'is not set')] # refers to LOCKDOWN
307 l += [KconfigCheck('cut_attack_surface', 'grsec', 'FUNCTION_TRACER', 'is not set')]
308 l += [KconfigCheck('cut_attack_surface', 'grsec', 'STACK_TRACER', 'is not set')]
309 l += [KconfigCheck('cut_attack_surface', 'grsec', 'HIST_TRIGGERS', 'is not set')]
310 l += [KconfigCheck('cut_attack_surface', 'grsec', 'BLK_DEV_IO_TRACE', 'is not set')]
311 l += [KconfigCheck('cut_attack_surface', 'grsec', 'PROC_VMCORE', 'is not set')]
312 l += [KconfigCheck('cut_attack_surface', 'grsec', 'PROC_PAGE_MONITOR', 'is not set')]
313 l += [KconfigCheck('cut_attack_surface', 'grsec', 'USELIB', 'is not set')]
314 l += [KconfigCheck('cut_attack_surface', 'grsec', 'CHECKPOINT_RESTORE', 'is not set')]
315 l += [KconfigCheck('cut_attack_surface', 'grsec', 'USERFAULTFD', 'is not set')]
316 l += [KconfigCheck('cut_attack_surface', 'grsec', 'HWPOISON_INJECT', 'is not set')]
317 l += [KconfigCheck('cut_attack_surface', 'grsec', 'MEM_SOFT_DIRTY', 'is not set')]
318 l += [KconfigCheck('cut_attack_surface', 'grsec', 'DEVPORT', 'is not set')] # refers to LOCKDOWN
319 l += [KconfigCheck('cut_attack_surface', 'grsec', 'DEBUG_FS', 'is not set')] # refers to LOCKDOWN
320 l += [KconfigCheck('cut_attack_surface', 'grsec', 'NOTIFIER_ERROR_INJECTION', 'is not set')]
321 l += [KconfigCheck('cut_attack_surface', 'grsec', 'FAIL_FUTEX', 'is not set')]
322 l += [KconfigCheck('cut_attack_surface', 'grsec', 'PUNIT_ATOM_DEBUG', 'is not set')]
323 l += [KconfigCheck('cut_attack_surface', 'grsec', 'ACPI_CONFIGFS', 'is not set')]
324 l += [KconfigCheck('cut_attack_surface', 'grsec', 'EDAC_DEBUG', 'is not set')]
325 l += [KconfigCheck('cut_attack_surface', 'grsec', 'DRM_I915_DEBUG', 'is not set')]
326 l += [KconfigCheck('cut_attack_surface', 'grsec', 'BCACHE_CLOSURES_DEBUG', 'is not set')]
327 l += [KconfigCheck('cut_attack_surface', 'grsec', 'DVB_C8SECTPFE', 'is not set')]
328 l += [KconfigCheck('cut_attack_surface', 'grsec', 'MTD_SLRAM', 'is not set')]
329 l += [KconfigCheck('cut_attack_surface', 'grsec', 'MTD_PHRAM', 'is not set')]
330 l += [KconfigCheck('cut_attack_surface', 'grsec', 'IO_URING', 'is not set')]
331 l += [KconfigCheck('cut_attack_surface', 'grsec', 'KCMP', 'is not set')]
332 l += [KconfigCheck('cut_attack_surface', 'grsec', 'RSEQ', 'is not set')]
333 l += [KconfigCheck('cut_attack_surface', 'grsec', 'LATENCYTOP', 'is not set')]
334 l += [KconfigCheck('cut_attack_surface', 'grsec', 'KCOV', 'is not set')]
335 l += [KconfigCheck('cut_attack_surface', 'grsec', 'PROVIDE_OHCI1394_DMA_INIT', 'is not set')]
336 l += [KconfigCheck('cut_attack_surface', 'grsec', 'SUNRPC_DEBUG', 'is not set')]
337 l += [AND(KconfigCheck('cut_attack_surface', 'grsec', 'PTDUMP_DEBUGFS', 'is not set'),
338 KconfigCheck('cut_attack_surface', 'grsec', 'X86_PTDUMP', 'is not set'))]
340 # 'cut_attack_surface', 'maintainer'
341 l += [KconfigCheck('cut_attack_surface', 'maintainer', 'DRM_LEGACY', 'is not set')] # recommended by Daniel Vetter in /issues/38
342 l += [KconfigCheck('cut_attack_surface', 'maintainer', 'FB', 'is not set')] # recommended by Daniel Vetter in /issues/38
343 l += [KconfigCheck('cut_attack_surface', 'maintainer', 'VT', 'is not set')] # recommended by Daniel Vetter in /issues/38
344 l += [KconfigCheck('cut_attack_surface', 'maintainer', 'BLK_DEV_FD', 'is not set')] # recommended by Denis Efremov in /pull/54
345 l += [KconfigCheck('cut_attack_surface', 'maintainer', 'BLK_DEV_FD_RAWCMD', 'is not set')] # recommended by Denis Efremov in /pull/62
346 l += [KconfigCheck('cut_attack_surface', 'maintainer', 'NOUVEAU_LEGACY_CTX_SUPPORT', 'is not set')]
347 # recommended by Dave Airlie in kernel commit b30a43ac7132cdda
349 # 'cut_attack_surface', 'clipos'
350 l += [KconfigCheck('cut_attack_surface', 'clipos', 'STAGING', 'is not set')]
351 l += [KconfigCheck('cut_attack_surface', 'clipos', 'KSM', 'is not set')] # to prevent FLUSH+RELOAD attack
352 l += [KconfigCheck('cut_attack_surface', 'clipos', 'KALLSYMS', 'is not set')]
353 l += [KconfigCheck('cut_attack_surface', 'clipos', 'MAGIC_SYSRQ', 'is not set')]
354 l += [KconfigCheck('cut_attack_surface', 'clipos', 'KEXEC_FILE', 'is not set')] # refers to LOCKDOWN (permissive)
355 l += [KconfigCheck('cut_attack_surface', 'clipos', 'USER_NS', 'is not set')] # user.max_user_namespaces=0
356 l += [KconfigCheck('cut_attack_surface', 'clipos', 'X86_CPUID', 'is not set')]
357 l += [KconfigCheck('cut_attack_surface', 'clipos', 'X86_IOPL_IOPERM', 'is not set')] # refers to LOCKDOWN
358 l += [KconfigCheck('cut_attack_surface', 'clipos', 'ACPI_TABLE_UPGRADE', 'is not set')] # refers to LOCKDOWN
359 l += [KconfigCheck('cut_attack_surface', 'clipos', 'EFI_CUSTOM_SSDT_OVERLAYS', 'is not set')]
360 # l += [KconfigCheck('cut_attack_surface', 'clipos', 'IKCONFIG', 'is not set')] # no, IKCONFIG is needed for this check :)
362 # 'cut_attack_surface', 'lockdown'
363 l += [KconfigCheck('cut_attack_surface', 'lockdown', 'EFI_TEST', 'is not set')] # refers to LOCKDOWN
364 l += [KconfigCheck('cut_attack_surface', 'lockdown', 'MMIOTRACE_TEST', 'is not set')] # refers to LOCKDOWN
365 l += [KconfigCheck('cut_attack_surface', 'lockdown', 'KPROBES', 'is not set')] # refers to LOCKDOWN
366 l += [bpf_syscall_not_set] # refers to LOCKDOWN
368 # 'cut_attack_surface', 'my'
369 l += [KconfigCheck('cut_attack_surface', 'my', 'LEGACY_TIOCSTI', 'is not set')]
370 l += [KconfigCheck('cut_attack_surface', 'my', 'MMIOTRACE', 'is not set')] # refers to LOCKDOWN (permissive)
371 l += [KconfigCheck('cut_attack_surface', 'my', 'LIVEPATCH', 'is not set')]
372 l += [KconfigCheck('cut_attack_surface', 'my', 'IP_DCCP', 'is not set')]
373 l += [KconfigCheck('cut_attack_surface', 'my', 'IP_SCTP', 'is not set')]
374 l += [KconfigCheck('cut_attack_surface', 'my', 'FTRACE', 'is not set')] # refers to LOCKDOWN
375 l += [KconfigCheck('cut_attack_surface', 'my', 'VIDEO_VIVID', 'is not set')]
376 l += [KconfigCheck('cut_attack_surface', 'my', 'INPUT_EVBUG', 'is not set')] # Can be used as a keylogger
377 l += [KconfigCheck('cut_attack_surface', 'my', 'KGDB', 'is not set')]
378 l += [KconfigCheck('cut_attack_surface', 'my', 'AIO', 'is not set')]
379 l += [KconfigCheck('cut_attack_surface', 'my', 'CORESIGHT', 'is not set')]
380 l += [KconfigCheck('cut_attack_surface', 'my', 'XFS_SUPPORT_V4', 'is not set')]
381 l += [OR(KconfigCheck('cut_attack_surface', 'my', 'TRIM_UNUSED_KSYMS', 'y'),
383 l += [KconfigCheck('cut_attack_surface', 'my', 'MODULE_FORCE_LOAD', 'is not set')]
387 l += [KconfigCheck('harden_userspace', 'defconfig', 'ARM64_PTR_AUTH', 'y')]
388 l += [KconfigCheck('harden_userspace', 'defconfig', 'ARM64_BTI', 'y')]
389 if arch in ('ARM', 'X86_32'):
390 l += [KconfigCheck('harden_userspace', 'defconfig', 'VMSPLIT_3G', 'y')]
391 l += [KconfigCheck('harden_userspace', 'clipos', 'COREDUMP', 'is not set')]
392 l += [KconfigCheck('harden_userspace', 'my', 'ARCH_MMAP_RND_BITS', 'MAX')] # 'MAX' value is refined using ARCH_MMAP_RND_BITS_MAX
395 def add_cmdline_checks(l, arch):
396 assert(arch), 'empty arch'
398 # Calling the CmdlineCheck class constructor:
399 # CmdlineCheck(reason, decision, name, expected)
401 # [!] Don't add CmdlineChecks in add_kconfig_checks() to avoid wrong results
402 # when the tool doesn't check the cmdline.
404 # [!] Make sure that values of the options in CmdlineChecks need normalization.
405 # For more info see normalize_cmdline_options().
407 # A common pattern for checking the 'param_x' cmdline parameter
408 # that __overrides__ the 'PARAM_X_DEFAULT' kconfig option:
409 # l += [OR(CmdlineCheck(reason, decision, 'param_x', '1'),
410 # AND(KconfigCheck(reason, decision, 'PARAM_X_DEFAULT_ON', 'y'),
411 # CmdlineCheck(reason, decision, 'param_x, 'is not set')))]
413 # Here we don't check the kconfig options or minimal kernel version
414 # required for the cmdline parameters. That would make the checks
415 # very complex and not give a 100% guarantee anyway.
417 # 'self_protection', 'defconfig'
418 l += [CmdlineCheck('self_protection', 'defconfig', 'nosmep', 'is not set')]
419 l += [CmdlineCheck('self_protection', 'defconfig', 'nosmap', 'is not set')]
420 l += [CmdlineCheck('self_protection', 'defconfig', 'nokaslr', 'is not set')]
421 l += [CmdlineCheck('self_protection', 'defconfig', 'nopti', 'is not set')]
422 l += [CmdlineCheck('self_protection', 'defconfig', 'nospectre_v1', 'is not set')]
423 l += [CmdlineCheck('self_protection', 'defconfig', 'nospectre_v2', 'is not set')]
424 l += [CmdlineCheck('self_protection', 'defconfig', 'nospectre_bhb', 'is not set')]
425 l += [CmdlineCheck('self_protection', 'defconfig', 'nospec_store_bypass_disable', 'is not set')]
426 l += [CmdlineCheck('self_protection', 'defconfig', 'dis_ucode_ldr', 'is not set')]
427 l += [CmdlineCheck('self_protection', 'defconfig', 'arm64.nobti', 'is not set')]
428 l += [CmdlineCheck('self_protection', 'defconfig', 'arm64.nopauth', 'is not set')]
429 l += [CmdlineCheck('self_protection', 'defconfig', 'arm64.nomte', 'is not set')]
430 if arch in ('X86_64', 'X86_32'):
431 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spectre_v2', 'is not off'),
432 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
433 CmdlineCheck('self_protection', 'defconfig', 'spectre_v2', 'is not set')))]
434 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spectre_v2_user', 'is not off'),
435 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
436 CmdlineCheck('self_protection', 'defconfig', 'spectre_v2_user', 'is not set')))]
437 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spec_store_bypass_disable', 'is not off'),
438 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
439 CmdlineCheck('self_protection', 'defconfig', 'spec_store_bypass_disable', 'is not set')))]
440 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'l1tf', 'is not off'),
441 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
442 CmdlineCheck('self_protection', 'defconfig', 'l1tf', 'is not set')))]
443 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'mds', 'is not off'),
444 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
445 CmdlineCheck('self_protection', 'defconfig', 'mds', 'is not set')))]
446 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'tsx_async_abort', 'is not off'),
447 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
448 CmdlineCheck('self_protection', 'defconfig', 'tsx_async_abort', 'is not set')))]
449 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'srbds', 'is not off'),
450 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
451 CmdlineCheck('self_protection', 'defconfig', 'srbds', 'is not set')))]
452 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'mmio_stale_data', 'is not off'),
453 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
454 CmdlineCheck('self_protection', 'defconfig', 'mmio_stale_data', 'is not set')))]
455 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'retbleed', 'is not off'),
456 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
457 CmdlineCheck('self_protection', 'defconfig', 'retbleed', 'is not set')))]
458 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spec_rstack_overflow', 'is not off'),
459 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
460 CmdlineCheck('self_protection', 'defconfig', 'spec_rstack_overflow', 'is not set')))]
461 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'gather_data_sampling', 'is not off'),
462 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
463 CmdlineCheck('self_protection', 'defconfig', 'gather_data_sampling', 'is not set')))]
465 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'kpti', 'is not off'),
466 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
467 CmdlineCheck('self_protection', 'defconfig', 'kpti', 'is not set')))]
468 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'ssbd', 'kernel'),
469 CmdlineCheck('self_protection', 'my', 'ssbd', 'force-on'),
470 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
471 CmdlineCheck('self_protection', 'defconfig', 'ssbd', 'is not set')))]
472 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'rodata', 'full'),
473 AND(KconfigCheck('self_protection', 'defconfig', 'RODATA_FULL_DEFAULT_ENABLED', 'y'),
474 CmdlineCheck('self_protection', 'defconfig', 'rodata', 'is not set')))]
476 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'rodata', 'on'),
477 CmdlineCheck('self_protection', 'defconfig', 'rodata', 'is not set'))]
479 # 'self_protection', 'kspp'
480 l += [CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt')]
481 l += [CmdlineCheck('self_protection', 'kspp', 'slab_merge', 'is not set')] # consequence of 'slab_nomerge' by kspp
482 l += [CmdlineCheck('self_protection', 'kspp', 'slub_merge', 'is not set')] # consequence of 'slab_nomerge' by kspp
483 l += [CmdlineCheck('self_protection', 'kspp', 'page_alloc.shuffle', '1')]
484 l += [OR(CmdlineCheck('self_protection', 'kspp', 'slab_nomerge', 'is present'),
485 AND(KconfigCheck('self_protection', 'clipos', 'SLAB_MERGE_DEFAULT', 'is not set'),
486 CmdlineCheck('self_protection', 'kspp', 'slab_merge', 'is not set'),
487 CmdlineCheck('self_protection', 'kspp', 'slub_merge', 'is not set')))]
488 l += [OR(CmdlineCheck('self_protection', 'kspp', 'init_on_alloc', '1'),
489 AND(KconfigCheck('self_protection', 'kspp', 'INIT_ON_ALLOC_DEFAULT_ON', 'y'),
490 CmdlineCheck('self_protection', 'kspp', 'init_on_alloc', 'is not set')))]
491 l += [OR(CmdlineCheck('self_protection', 'kspp', 'init_on_free', '1'),
492 AND(KconfigCheck('self_protection', 'kspp', 'INIT_ON_FREE_DEFAULT_ON', 'y'),
493 CmdlineCheck('self_protection', 'kspp', 'init_on_free', 'is not set')),
494 AND(CmdlineCheck('self_protection', 'kspp', 'page_poison', '1'),
495 KconfigCheck('self_protection', 'kspp', 'PAGE_POISONING_ZERO', 'y'),
496 CmdlineCheck('self_protection', 'kspp', 'slub_debug', 'P')))]
497 l += [OR(CmdlineCheck('self_protection', 'kspp', 'hardened_usercopy', '1'),
498 AND(KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY', 'y'),
499 CmdlineCheck('self_protection', 'kspp', 'hardened_usercopy', 'is not set')))]
500 l += [AND(CmdlineCheck('self_protection', 'kspp', 'slab_common.usercopy_fallback', 'is not set'),
501 KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY_FALLBACK', 'is not set'))]
502 # don't require slab_common.usercopy_fallback=0,
503 # since HARDENED_USERCOPY_FALLBACK was removed in Linux v5.16
504 if arch in ('X86_64', 'ARM64', 'X86_32'):
505 l += [OR(CmdlineCheck('self_protection', 'kspp', 'iommu.strict', '1'),
506 AND(KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_DMA_STRICT', 'y'),
507 CmdlineCheck('self_protection', 'kspp', 'iommu.strict', 'is not set')))]
508 l += [OR(CmdlineCheck('self_protection', 'kspp', 'iommu.passthrough', '0'),
509 AND(KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_PASSTHROUGH', 'is not set'),
510 CmdlineCheck('self_protection', 'kspp', 'iommu.passthrough', 'is not set')))]
511 l += [OR(CmdlineCheck('self_protection', 'kspp', 'randomize_kstack_offset', '1'),
512 AND(KconfigCheck('self_protection', 'kspp', 'RANDOMIZE_KSTACK_OFFSET_DEFAULT', 'y'),
513 CmdlineCheck('self_protection', 'kspp', 'randomize_kstack_offset', 'is not set')))]
514 if arch in ('X86_64', 'X86_32'):
515 l += [AND(CmdlineCheck('self_protection', 'kspp', 'pti', 'on'),
516 CmdlineCheck('self_protection', 'defconfig', 'nopti', 'is not set'))]
518 # 'self_protection', 'clipos'
519 if arch in ('X86_64', 'X86_32'):
520 l += [CmdlineCheck('self_protection', 'clipos', 'iommu', 'force')]
522 # 'self_protection', 'my'
523 l += [OR(CmdlineCheck('self_protection', 'my', 'kfence.sample_interval', 'is not off'),
524 AND(KconfigCheck('self_protection', 'my', 'KFENCE_SAMPLE_INTERVAL', 'is not off'),
525 CmdlineCheck('self_protection', 'my', 'kfence.sample_interval', 'is not set')))]
527 # 'cut_attack_surface', 'defconfig'
528 if arch in ('X86_64', 'X86_32'):
529 l += [OR(CmdlineCheck('cut_attack_surface', 'defconfig', 'tsx', 'off'),
530 AND(KconfigCheck('cut_attack_surface', 'defconfig', 'X86_INTEL_TSX_MODE_OFF', 'y'),
531 CmdlineCheck('cut_attack_surface', 'defconfig', 'tsx', 'is not set')))]
533 # 'cut_attack_surface', 'kspp'
534 l += [CmdlineCheck('cut_attack_surface', 'kspp', 'nosmt', 'is present')] # slow (high performance penalty)
536 l += [OR(CmdlineCheck('cut_attack_surface', 'kspp', 'vsyscall', 'none'),
537 KconfigCheck('cut_attack_surface', 'kspp', 'X86_VSYSCALL_EMULATION', 'is not set'),
538 AND(KconfigCheck('cut_attack_surface', 'kspp', 'LEGACY_VSYSCALL_NONE', 'y'),
539 CmdlineCheck('cut_attack_surface', 'kspp', 'vsyscall', 'is not set')))]
540 l += [OR(CmdlineCheck('cut_attack_surface', 'my', 'vdso32', '1'),
541 CmdlineCheck('cut_attack_surface', 'my', 'vdso32', '0'),
542 AND(KconfigCheck('cut_attack_surface', 'kspp', 'COMPAT_VDSO', 'is not set'),
543 CmdlineCheck('cut_attack_surface', 'my', 'vdso32', 'is not set')))] # the vdso32 parameter must not be 2
545 l += [OR(CmdlineCheck('cut_attack_surface', 'my', 'vdso32', '1'),
546 CmdlineCheck('cut_attack_surface', 'my', 'vdso', '1'),
547 CmdlineCheck('cut_attack_surface', 'my', 'vdso32', '0'),
548 CmdlineCheck('cut_attack_surface', 'my', 'vdso', '0'),
549 AND(KconfigCheck('cut_attack_surface', 'kspp', 'COMPAT_VDSO', 'is not set'),
550 CmdlineCheck('cut_attack_surface', 'my', 'vdso32', 'is not set'),
551 CmdlineCheck('cut_attack_surface', 'my', 'vdso', 'is not set')))] # the vdso and vdso32 parameters must not be 2
553 # 'cut_attack_surface', 'grsec'
554 # The cmdline checks compatible with the kconfig options disabled by grsecurity...
555 l += [OR(CmdlineCheck('cut_attack_surface', 'grsec', 'debugfs', 'off'),
556 KconfigCheck('cut_attack_surface', 'grsec', 'DEBUG_FS', 'is not set'))] # ... the end
558 # 'cut_attack_surface', 'my'
559 l += [CmdlineCheck('cut_attack_surface', 'my', 'sysrq_always_enabled', 'is not set')]
562 l += [CmdlineCheck('harden_userspace', 'defconfig', 'norandmaps', 'is not set')]
565 no_kstrtobool_options = [
566 'debugfs', # See debugfs_kernel() in fs/debugfs/inode.c
567 'mitigations', # See mitigations_parse_cmdline() in kernel/cpu.c
568 'pti', # See pti_check_boottime_disable() in arch/x86/mm/pti.c
569 'spectre_v2', # See spectre_v2_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
570 'spectre_v2_user', # See spectre_v2_parse_user_cmdline() in arch/x86/kernel/cpu/bugs.c
571 'spec_store_bypass_disable', # See ssb_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
572 'l1tf', # See l1tf_cmdline() in arch/x86/kernel/cpu/bugs.c
573 'mds', # See mds_cmdline() in arch/x86/kernel/cpu/bugs.c
574 'tsx_async_abort', # See tsx_async_abort_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
575 'srbds', # See srbds_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
576 'mmio_stale_data', # See mmio_stale_data_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
577 'retbleed', # See retbleed_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
578 'rodata', # See set_debug_rodata() in init/main.c
579 'ssbd', # See parse_spectre_v4_param() in arch/arm64/kernel/proton-pack.c
580 'spec_rstack_overflow', # See srso_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
581 'gather_data_sampling', # See gds_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
582 'slub_debug', # See setup_slub_debug() in mm/slub.c
583 'iommu', # See iommu_setup() in arch/x86/kernel/pci-dma.c
584 'vsyscall', # See vsyscall_setup() in arch/x86/entry/vsyscall/vsyscall_64.c
585 'vdso32', # See vdso32_setup() in arch/x86/entry/vdso/vdso32-setup.c
586 'vdso', # See vdso32_setup() in arch/x86/entry/vdso/vdso32-setup.c
587 'tsx' # See tsx_init() in arch/x86/kernel/cpu/tsx.c
591 def normalize_cmdline_options(option, value):
592 # Don't normalize the cmdline option values if
593 # the Linux kernel doesn't use kstrtobool() for them
594 if option in no_kstrtobool_options:
597 # Implement a limited part of the kstrtobool() logic
598 if value.lower() in ('1', 'on', 'y', 'yes', 't', 'true'):
600 if value.lower() in ('0', 'off', 'n', 'no', 'f', 'false'):
603 # Preserve unique values
607 # TODO: draft of security hardening sysctls:
608 # what about bpf_jit_enable?
609 # vm.mmap_min_addr has a good value
610 # nosmt sysfs control file
611 # vm.mmap_rnd_bits=max (?)
613 # abi.vsyscall32 (any value except 2)
614 # kernel.oops_limit (think about a proper value)
615 # kernel.warn_limit (think about a proper value)
616 # net.ipv4.tcp_syncookies=1 (?)
618 def add_sysctl_checks(l, arch):
619 # This function may be called with arch=None
621 # Calling the SysctlCheck class constructor:
622 # SysctlCheck(reason, decision, name, expected)
624 l += [SysctlCheck('self_protection', 'kspp', 'net.core.bpf_jit_harden', '2')]
626 l += [SysctlCheck('cut_attack_surface', 'kspp', 'kernel.dmesg_restrict', '1')]
627 l += [SysctlCheck('cut_attack_surface', 'kspp', 'kernel.perf_event_paranoid', '3')] # with a custom patch, see https://lwn.net/Articles/696216/
628 l += [SysctlCheck('cut_attack_surface', 'kspp', 'kernel.kexec_load_disabled', '1')]
629 l += [SysctlCheck('cut_attack_surface', 'kspp', 'user.max_user_namespaces', '0')]
630 l += [SysctlCheck('cut_attack_surface', 'kspp', 'dev.tty.ldisc_autoload', '0')]
631 l += [SysctlCheck('cut_attack_surface', 'kspp', 'kernel.unprivileged_bpf_disabled', '1')]
632 l += [SysctlCheck('cut_attack_surface', 'kspp', 'kernel.kptr_restrict', '2')]
633 l += [SysctlCheck('cut_attack_surface', 'kspp', 'dev.tty.legacy_tiocsti', '0')]
634 l += [SysctlCheck('cut_attack_surface', 'kspp', 'vm.unprivileged_userfaultfd', '0')]
635 # At first, it disabled unprivileged userfaultfd,
636 # and since v5.11 it enables unprivileged userfaultfd for user-mode only.
638 l += [SysctlCheck('cut_attack_surface', 'clipos', 'kernel.modules_disabled', '1')] # radical, but may be useful in some cases
640 l += [SysctlCheck('harden_userspace', 'kspp', 'fs.protected_symlinks', '1')]
641 l += [SysctlCheck('harden_userspace', 'kspp', 'fs.protected_hardlinks', '1')]
642 l += [SysctlCheck('harden_userspace', 'kspp', 'fs.protected_fifos', '2')]
643 l += [SysctlCheck('harden_userspace', 'kspp', 'fs.protected_regular', '2')]
644 l += [SysctlCheck('harden_userspace', 'kspp', 'fs.suid_dumpable', '0')]
645 l += [SysctlCheck('harden_userspace', 'kspp', 'kernel.randomize_va_space', '2')]
646 l += [SysctlCheck('harden_userspace', 'kspp', 'kernel.yama.ptrace_scope', '3')]