1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/errno.h>
5 #include <linux/kernel.h>
8 #include <linux/prctl.h>
9 #include <linux/slab.h>
10 #include <linux/sched.h>
11 #include <linux/sched/idle.h>
12 #include <linux/sched/debug.h>
13 #include <linux/sched/task.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/init.h>
16 #include <linux/export.h>
18 #include <linux/tick.h>
19 #include <linux/random.h>
20 #include <linux/user-return-notifier.h>
21 #include <linux/dmi.h>
22 #include <linux/utsname.h>
23 #include <linux/stackprotector.h>
24 #include <linux/cpuidle.h>
25 #include <trace/events/power.h>
26 #include <linux/hw_breakpoint.h>
29 #include <asm/syscalls.h>
30 #include <linux/uaccess.h>
31 #include <asm/mwait.h>
32 #include <asm/fpu/internal.h>
33 #include <asm/debugreg.h>
35 #include <asm/tlbflush.h>
38 #include <asm/switch_to.h>
40 #include <asm/prctl.h>
41 #include <asm/spec-ctrl.h>
46 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
47 * no more per-task TSS's. The TSS size is kept cacheline-aligned
48 * so they are allowed to end up in the .data..cacheline_aligned
49 * section. Since TSS's are completely CPU-local, we want them
50 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
52 __visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
55 * .sp0 is only used when entering ring 0 from a lower
56 * privilege level. Since the init task never runs anything
57 * but ring 0 code, there is no need for a valid value here.
60 .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
63 * .sp1 is cpu_current_top_of_stack. The init task never
64 * runs user code, but cpu_current_top_of_stack should still
65 * be well defined before the first context switch.
67 .sp1 = TOP_OF_INIT_STACK,
72 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
77 * Note that the .io_bitmap member must be extra-big. This is because
78 * the CPU will access an additional byte beyond the end of the IO
79 * permission bitmap. The extra byte must be all 1 bits, and must
80 * be within the limit.
82 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
85 EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
87 DEFINE_PER_CPU(bool, __tss_limit_invalid);
88 EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
91 * this gets called so that we can store lazy state into memory and copy the
92 * current task into the new thread.
94 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
96 memcpy(dst, src, arch_task_struct_size);
98 dst->thread.vm86 = NULL;
101 return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
105 * Free current thread data structures etc..
107 void exit_thread(struct task_struct *tsk)
109 struct thread_struct *t = &tsk->thread;
110 unsigned long *bp = t->io_bitmap_ptr;
111 struct fpu *fpu = &t->fpu;
114 struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu());
116 t->io_bitmap_ptr = NULL;
117 clear_thread_flag(TIF_IO_BITMAP);
119 * Careful, clear this in the TSS too:
121 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
122 t->io_bitmap_max = 0;
132 void flush_thread(void)
134 struct task_struct *tsk = current;
136 flush_ptrace_hw_breakpoint(tsk);
137 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
139 fpu__clear(&tsk->thread.fpu);
142 void disable_TSC(void)
145 if (!test_and_set_thread_flag(TIF_NOTSC))
147 * Must flip the CPU state synchronously with
148 * TIF_NOTSC in the current running context.
150 cr4_set_bits(X86_CR4_TSD);
154 static void enable_TSC(void)
157 if (test_and_clear_thread_flag(TIF_NOTSC))
159 * Must flip the CPU state synchronously with
160 * TIF_NOTSC in the current running context.
162 cr4_clear_bits(X86_CR4_TSD);
166 int get_tsc_mode(unsigned long adr)
170 if (test_thread_flag(TIF_NOTSC))
171 val = PR_TSC_SIGSEGV;
175 return put_user(val, (unsigned int __user *)adr);
178 int set_tsc_mode(unsigned int val)
180 if (val == PR_TSC_SIGSEGV)
182 else if (val == PR_TSC_ENABLE)
190 DEFINE_PER_CPU(u64, msr_misc_features_shadow);
192 static void set_cpuid_faulting(bool on)
196 msrval = this_cpu_read(msr_misc_features_shadow);
197 msrval &= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
198 msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT);
199 this_cpu_write(msr_misc_features_shadow, msrval);
200 wrmsrl(MSR_MISC_FEATURES_ENABLES, msrval);
203 static void disable_cpuid(void)
206 if (!test_and_set_thread_flag(TIF_NOCPUID)) {
208 * Must flip the CPU state synchronously with
209 * TIF_NOCPUID in the current running context.
211 set_cpuid_faulting(true);
216 static void enable_cpuid(void)
219 if (test_and_clear_thread_flag(TIF_NOCPUID)) {
221 * Must flip the CPU state synchronously with
222 * TIF_NOCPUID in the current running context.
224 set_cpuid_faulting(false);
229 static int get_cpuid_mode(void)
231 return !test_thread_flag(TIF_NOCPUID);
234 static int set_cpuid_mode(struct task_struct *task, unsigned long cpuid_enabled)
236 if (!static_cpu_has(X86_FEATURE_CPUID_FAULT))
248 * Called immediately after a successful exec.
250 void arch_setup_new_exec(void)
252 /* If cpuid was previously disabled for this task, re-enable it. */
253 if (test_thread_flag(TIF_NOCPUID))
257 static inline void switch_to_bitmap(struct thread_struct *prev,
258 struct thread_struct *next,
259 unsigned long tifp, unsigned long tifn)
261 struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
263 if (tifn & _TIF_IO_BITMAP) {
265 * Copy the relevant range of the IO bitmap.
266 * Normally this is 128 bytes or less:
268 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
269 max(prev->io_bitmap_max, next->io_bitmap_max));
271 * Make sure that the TSS limit is correct for the CPU
272 * to notice the IO bitmap.
275 } else if (tifp & _TIF_IO_BITMAP) {
277 * Clear any possible leftover bits:
279 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
286 struct ssb_state *shared_state;
288 unsigned int disable_state;
289 unsigned long local_state;
294 static DEFINE_PER_CPU(struct ssb_state, ssb_state);
296 void speculative_store_bypass_ht_init(void)
298 struct ssb_state *st = this_cpu_ptr(&ssb_state);
299 unsigned int this_cpu = smp_processor_id();
305 * Shared state setup happens once on the first bringup
306 * of the CPU. It's not destroyed on CPU hotunplug.
308 if (st->shared_state)
311 raw_spin_lock_init(&st->lock);
314 * Go over HT siblings and check whether one of them has set up the
315 * shared state pointer already.
317 for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
321 if (!per_cpu(ssb_state, cpu).shared_state)
324 /* Link it to the state of the sibling: */
325 st->shared_state = per_cpu(ssb_state, cpu).shared_state;
330 * First HT sibling to come up on the core. Link shared state of
331 * the first HT sibling to itself. The siblings on the same core
332 * which come up later will see the shared state pointer and link
333 * themself to the state of this CPU.
335 st->shared_state = st;
339 * Logic is: First HT sibling enables SSBD for both siblings in the core
340 * and last sibling to disable it, disables it for the whole core. This how
341 * MSR_SPEC_CTRL works in "hardware":
343 * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
345 static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
347 struct ssb_state *st = this_cpu_ptr(&ssb_state);
348 u64 msr = x86_amd_ls_cfg_base;
350 if (!static_cpu_has(X86_FEATURE_ZEN)) {
351 msr |= ssbd_tif_to_amd_ls_cfg(tifn);
352 wrmsrl(MSR_AMD64_LS_CFG, msr);
356 if (tifn & _TIF_SSBD) {
358 * Since this can race with prctl(), block reentry on the
361 if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
364 msr |= x86_amd_ls_cfg_ssbd_mask;
366 raw_spin_lock(&st->shared_state->lock);
367 /* First sibling enables SSBD: */
368 if (!st->shared_state->disable_state)
369 wrmsrl(MSR_AMD64_LS_CFG, msr);
370 st->shared_state->disable_state++;
371 raw_spin_unlock(&st->shared_state->lock);
373 if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
376 raw_spin_lock(&st->shared_state->lock);
377 st->shared_state->disable_state--;
378 if (!st->shared_state->disable_state)
379 wrmsrl(MSR_AMD64_LS_CFG, msr);
380 raw_spin_unlock(&st->shared_state->lock);
384 static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
386 u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
388 wrmsrl(MSR_AMD64_LS_CFG, msr);
392 static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
395 * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
396 * so ssbd_tif_to_spec_ctrl() just works.
398 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
402 * Update the MSRs managing speculation control, during context switch.
404 * tifp: Previous task's thread flags
405 * tifn: Next task's thread flags
407 static __always_inline void __speculation_ctrl_update(unsigned long tifp,
410 unsigned long tif_diff = tifp ^ tifn;
411 u64 msr = x86_spec_ctrl_base;
414 lockdep_assert_irqs_disabled();
416 /* Handle change of TIF_SSBD depending on the mitigation method. */
417 if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
418 if (tif_diff & _TIF_SSBD)
419 amd_set_ssb_virt_state(tifn);
420 } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
421 if (tif_diff & _TIF_SSBD)
422 amd_set_core_ssb_state(tifn);
423 } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
424 static_cpu_has(X86_FEATURE_AMD_SSBD)) {
425 updmsr |= !!(tif_diff & _TIF_SSBD);
426 msr |= ssbd_tif_to_spec_ctrl(tifn);
429 /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */
430 if (IS_ENABLED(CONFIG_SMP) &&
431 static_branch_unlikely(&switch_to_cond_stibp)) {
432 updmsr |= !!(tif_diff & _TIF_SPEC_IB);
433 msr |= stibp_tif_to_spec_ctrl(tifn);
437 update_spec_ctrl_cond(msr);
440 static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
442 if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) {
443 if (task_spec_ssb_disable(tsk))
444 set_tsk_thread_flag(tsk, TIF_SSBD);
446 clear_tsk_thread_flag(tsk, TIF_SSBD);
448 if (task_spec_ib_disable(tsk))
449 set_tsk_thread_flag(tsk, TIF_SPEC_IB);
451 clear_tsk_thread_flag(tsk, TIF_SPEC_IB);
453 /* Return the updated threadinfo flags*/
454 return task_thread_info(tsk)->flags;
457 void speculation_ctrl_update(unsigned long tif)
461 /* Forced update. Make sure all relevant TIF flags are different */
462 local_irq_save(flags);
463 __speculation_ctrl_update(~tif, tif);
464 local_irq_restore(flags);
467 /* Called from seccomp/prctl update */
468 void speculation_ctrl_update_current(void)
471 speculation_ctrl_update(speculation_ctrl_update_tif(current));
475 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
477 struct thread_struct *prev, *next;
478 unsigned long tifp, tifn;
480 prev = &prev_p->thread;
481 next = &next_p->thread;
483 tifn = READ_ONCE(task_thread_info(next_p)->flags);
484 tifp = READ_ONCE(task_thread_info(prev_p)->flags);
485 switch_to_bitmap(prev, next, tifp, tifn);
487 propagate_user_return_notify(prev_p, next_p);
489 if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) &&
490 arch_has_block_step()) {
491 unsigned long debugctl, msk;
493 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
494 debugctl &= ~DEBUGCTLMSR_BTF;
495 msk = tifn & _TIF_BLOCKSTEP;
496 debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
497 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
500 if ((tifp ^ tifn) & _TIF_NOTSC)
501 cr4_toggle_bits_irqsoff(X86_CR4_TSD);
503 if ((tifp ^ tifn) & _TIF_NOCPUID)
504 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
506 if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) {
507 __speculation_ctrl_update(tifp, tifn);
509 speculation_ctrl_update_tif(prev_p);
510 tifn = speculation_ctrl_update_tif(next_p);
512 /* Enforce MSR update to ensure consistent state */
513 __speculation_ctrl_update(~tifn, tifn);
518 * Idle related variables and functions
520 unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
521 EXPORT_SYMBOL(boot_option_idle_override);
523 static void (*x86_idle)(void);
526 static inline void play_dead(void)
532 void arch_cpu_idle_enter(void)
534 tsc_verify_tsc_adjust(false);
538 void arch_cpu_idle_dead(void)
544 * Called from the generic idle code.
546 void arch_cpu_idle(void)
552 * We use this if we don't have any better idle routine..
554 void __cpuidle default_idle(void)
556 trace_cpu_idle_rcuidle(1, smp_processor_id());
558 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
560 #ifdef CONFIG_APM_MODULE
561 EXPORT_SYMBOL(default_idle);
565 bool xen_set_default_idle(void)
567 bool ret = !!x86_idle;
569 x86_idle = default_idle;
575 void stop_this_cpu(void *dummy)
581 set_cpu_online(smp_processor_id(), false);
582 disable_local_APIC();
583 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
586 * Use wbinvd on processors that support SME. This provides support
587 * for performing a successful kexec when going from SME inactive
588 * to SME active (or vice-versa). The cache must be cleared so that
589 * if there are entries with the same physical address, both with and
590 * without the encryption bit, they don't race each other when flushed
591 * and potentially end up with the wrong entry being committed to
594 if (boot_cpu_has(X86_FEATURE_SME))
598 * Use native_halt() so that memory contents don't change
599 * (stack usage and variables) after possibly issuing the
600 * native_wbinvd() above.
607 * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
608 * states (local apic timer and TSC stop).
610 static void amd_e400_idle(void)
613 * We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E
614 * gets set after static_cpu_has() places have been converted via
617 if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
622 tick_broadcast_enter();
627 * The switch back from broadcast mode needs to be called with
628 * interrupts disabled.
631 tick_broadcast_exit();
636 * Intel Core2 and older machines prefer MWAIT over HALT for C1.
637 * We can't rely on cpuidle installing MWAIT, because it will not load
638 * on systems that support only C1 -- so the boot default must be MWAIT.
640 * Some AMD machines are the opposite, they depend on using HALT.
642 * So for default C1, which is used during boot until cpuidle loads,
643 * use MWAIT-C1 on Intel HW that has it, else use HALT.
645 static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
647 if (c->x86_vendor != X86_VENDOR_INTEL)
650 if (!cpu_has(c, X86_FEATURE_MWAIT) || static_cpu_has_bug(X86_BUG_MONITOR))
657 * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
658 * with interrupts enabled and no flags, which is backwards compatible with the
659 * original MWAIT implementation.
661 static __cpuidle void mwait_idle(void)
663 if (!current_set_polling_and_test()) {
664 trace_cpu_idle_rcuidle(1, smp_processor_id());
665 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
667 clflush((void *)¤t_thread_info()->flags);
671 __monitor((void *)¤t_thread_info()->flags, 0, 0);
676 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
680 __current_clr_polling();
683 void select_idle_routine(const struct cpuinfo_x86 *c)
686 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
687 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
689 if (x86_idle || boot_option_idle_override == IDLE_POLL)
692 if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
693 pr_info("using AMD E400 aware idle routine\n");
694 x86_idle = amd_e400_idle;
695 } else if (prefer_mwait_c1_over_halt(c)) {
696 pr_info("using mwait in idle threads\n");
697 x86_idle = mwait_idle;
699 x86_idle = default_idle;
702 void amd_e400_c1e_apic_setup(void)
704 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
705 pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
707 tick_broadcast_force();
712 void __init arch_post_acpi_subsys_init(void)
716 if (!boot_cpu_has_bug(X86_BUG_AMD_E400))
720 * AMD E400 detection needs to happen after ACPI has been enabled. If
721 * the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in
722 * MSR_K8_INT_PENDING_MSG.
724 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
725 if (!(lo & K8_INTP_C1E_ACTIVE_MASK))
728 boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E);
730 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
731 mark_tsc_unstable("TSC halt in AMD C1E");
732 pr_info("System has AMD C1E enabled\n");
735 static int __init idle_setup(char *str)
740 if (!strcmp(str, "poll")) {
741 pr_info("using polling idle threads\n");
742 boot_option_idle_override = IDLE_POLL;
743 cpu_idle_poll_ctrl(true);
744 } else if (!strcmp(str, "halt")) {
746 * When the boot option of idle=halt is added, halt is
747 * forced to be used for CPU idle. In such case CPU C2/C3
748 * won't be used again.
749 * To continue to load the CPU idle driver, don't touch
750 * the boot_option_idle_override.
752 x86_idle = default_idle;
753 boot_option_idle_override = IDLE_HALT;
754 } else if (!strcmp(str, "nomwait")) {
756 * If the boot option of "idle=nomwait" is added,
757 * it means that mwait will be disabled for CPU C2/C3
758 * states. In such case it won't touch the variable
759 * of boot_option_idle_override.
761 boot_option_idle_override = IDLE_NOMWAIT;
767 early_param("idle", idle_setup);
769 unsigned long arch_align_stack(unsigned long sp)
771 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
772 sp -= get_random_int() % 8192;
776 unsigned long arch_randomize_brk(struct mm_struct *mm)
778 return randomize_page(mm->brk, 0x02000000);
782 * Called from fs/proc with a reference on @p to find the function
783 * which called into schedule(). This needs to be done carefully
784 * because the task might wake up and we might look at a stack
787 unsigned long get_wchan(struct task_struct *p)
789 unsigned long start, bottom, top, sp, fp, ip, ret = 0;
792 if (!p || p == current || p->state == TASK_RUNNING)
795 if (!try_get_task_stack(p))
798 start = (unsigned long)task_stack_page(p);
803 * Layout of the stack page:
805 * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
807 * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
809 * ----------- bottom = start
811 * The tasks stack pointer points at the location where the
812 * framepointer is stored. The data on the stack is:
813 * ... IP FP ... IP FP
815 * We need to read FP and IP, so we need to adjust the upper
816 * bound by another unsigned long.
818 top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
819 top -= 2 * sizeof(unsigned long);
822 sp = READ_ONCE(p->thread.sp);
823 if (sp < bottom || sp > top)
826 fp = READ_ONCE_NOCHECK(((struct inactive_task_frame *)sp)->bp);
828 if (fp < bottom || fp > top)
830 ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long)));
831 if (!in_sched_functions(ip)) {
835 fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
836 } while (count++ < 16 && p->state != TASK_RUNNING);
843 long do_arch_prctl_common(struct task_struct *task, int option,
844 unsigned long cpuid_enabled)
848 return get_cpuid_mode();
850 return set_cpuid_mode(task, cpuid_enabled);