2 * Low-level exception handling code
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors: Catalin Marinas <catalin.marinas@arm.com>
6 * Will Deacon <will.deacon@arm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/arm-smccc.h>
22 #include <linux/init.h>
23 #include <linux/linkage.h>
25 #include <asm/alternative.h>
26 #include <asm/assembler.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/cpufeature.h>
29 #include <asm/errno.h>
32 #include <asm/memory.h>
34 #include <asm/processor.h>
35 #include <asm/ptrace.h>
36 #include <asm/thread_info.h>
37 #include <asm/asm-uaccess.h>
38 #include <asm/unistd.h>
41 * Context tracking subsystem. Used to instrument transitions
42 * between user and kernel mode.
45 #ifdef CONFIG_CONTEXT_TRACKING
46 bl context_tracking_user_exit
51 #ifdef CONFIG_CONTEXT_TRACKING
52 bl context_tracking_user_enter
57 .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
71 .macro kernel_ventry, el, label, regsize = 64
76 * This must be the first instruction of the EL0 vector entries. It is
77 * skipped by the trampoline vectors, to trigger the cleanup.
79 b .Lskip_tramp_vectors_cleanup\@
86 .Lskip_tramp_vectors_cleanup\@:
89 sub sp, sp, #S_FRAME_SIZE
90 #ifdef CONFIG_VMAP_STACK
92 * Test whether the SP has overflowed, without corrupting a GPR.
93 * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
95 add sp, sp, x0 // sp' = sp + x0
96 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
97 tbnz x0, #THREAD_SHIFT, 0f
98 sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
99 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
104 * Either we've just detected an overflow, or we've taken an exception
105 * while on the overflow stack. Either way, we won't return to
106 * userspace, and can clobber EL0 registers to free up GPRs.
109 /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
112 /* Recover the original x0 value and stash it in tpidrro_el0 */
116 /* Switch to the overflow stack */
117 adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
120 * Check whether we were already on the overflow stack. This may happen
121 * after panic() re-enables interrupts.
123 mrs x0, tpidr_el0 // sp of interrupted context
124 sub x0, sp, x0 // delta with top of overflow stack
125 tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range?
126 b.ne __bad_stack // no? -> bad stack pointer
128 /* We were already on the overflow stack. Restore sp/x0 and carry on. */
133 .org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
136 .macro tramp_alias, dst, sym, tmp
137 mov_q \dst, TRAMP_VALIAS
140 adr_l \tmp, .entry.tramp.text
144 // This macro corrupts x0-x3. It is the caller's duty
145 // to save/restore them if required.
146 .macro apply_ssbd, state, tmp1, tmp2
147 #ifdef CONFIG_ARM64_SSBD
148 alternative_cb arm64_enable_wa2_handling
149 b .L__asm_ssbd_skip\@
151 ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
152 cbz \tmp2, .L__asm_ssbd_skip\@
153 ldr \tmp2, [tsk, #TSK_TI_FLAGS]
154 tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
155 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
157 alternative_cb arm64_update_smccc_conduit
158 nop // Patched to SMC/HVC #0
164 .macro kernel_entry, el, regsize = 64
166 mov w0, w0 // zero upper 32 bits of x0
168 stp x0, x1, [sp, #16 * 0]
169 stp x2, x3, [sp, #16 * 1]
170 stp x4, x5, [sp, #16 * 2]
171 stp x6, x7, [sp, #16 * 3]
172 stp x8, x9, [sp, #16 * 4]
173 stp x10, x11, [sp, #16 * 5]
174 stp x12, x13, [sp, #16 * 6]
175 stp x14, x15, [sp, #16 * 7]
176 stp x16, x17, [sp, #16 * 8]
177 stp x18, x19, [sp, #16 * 9]
178 stp x20, x21, [sp, #16 * 10]
179 stp x22, x23, [sp, #16 * 11]
180 stp x24, x25, [sp, #16 * 12]
181 stp x26, x27, [sp, #16 * 13]
182 stp x28, x29, [sp, #16 * 14]
187 ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear,
188 ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
189 disable_step_tsk x19, x20 // exceptions when scheduling.
191 apply_ssbd 1, x22, x23
194 add x21, sp, #S_FRAME_SIZE
196 /* Save the task's original addr_limit and set USER_DS */
197 ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
198 str x20, [sp, #S_ORIG_ADDR_LIMIT]
200 str x20, [tsk, #TSK_TI_ADDR_LIMIT]
201 /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
202 .endif /* \el == 0 */
205 stp lr, x21, [sp, #S_LR]
208 * In order to be able to dump the contents of struct pt_regs at the
209 * time the exception was taken (in case we attempt to walk the call
210 * stack later), chain it together with the stack frames.
213 stp xzr, xzr, [sp, #S_STACKFRAME]
215 stp x29, x22, [sp, #S_STACKFRAME]
217 add x29, sp, #S_STACKFRAME
219 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
221 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
222 * EL0, there is no need to check the state of TTBR0_EL1 since
223 * accesses are always enabled.
224 * Note that the meaning of this bit differs from the ARMv8.1 PAN
225 * feature as all TTBR0_EL1 accesses are disabled, not just those to
228 alternative_if ARM64_HAS_PAN
229 b 1f // skip TTBR0 PAN
230 alternative_else_nop_endif
234 tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
235 orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
236 b.eq 1f // TTBR0 access already disabled
237 and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
240 __uaccess_ttbr0_disable x21
244 stp x22, x23, [sp, #S_PC]
246 /* Not in a syscall by default (el0_svc overwrites for real syscall) */
249 str w21, [sp, #S_SYSCALLNO]
253 * Set sp_el0 to current thread_info.
260 * Registers that may be useful after this macro is invoked:
264 * x23 - aborted PSTATE
268 .macro kernel_exit, el
272 /* Restore the task's original addr_limit. */
273 ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
274 str x20, [tsk, #TSK_TI_ADDR_LIMIT]
276 /* No need to restore UAO, it will be restored from SPSR_EL1 */
279 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
284 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
286 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
289 alternative_if ARM64_HAS_PAN
290 b 2f // skip TTBR0 PAN
291 alternative_else_nop_endif
294 tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
297 __uaccess_ttbr0_enable x0, x1
301 * Enable errata workarounds only if returning to user. The only
302 * workaround currently required for TTBR0_EL1 changes are for the
303 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
306 bl post_ttbr_update_workaround
310 and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit
316 ldr x23, [sp, #S_SP] // load return stack pointer
318 tst x22, #PSR_MODE32_BIT // native task?
321 #ifdef CONFIG_ARM64_ERRATUM_845719
322 alternative_if ARM64_WORKAROUND_845719
323 #ifdef CONFIG_PID_IN_CONTEXTIDR
324 mrs x29, contextidr_el1
325 msr contextidr_el1, x29
327 msr contextidr_el1, xzr
329 alternative_else_nop_endif
335 msr elr_el1, x21 // set up the return data
337 ldp x0, x1, [sp, #16 * 0]
338 ldp x2, x3, [sp, #16 * 1]
339 ldp x4, x5, [sp, #16 * 2]
340 ldp x6, x7, [sp, #16 * 3]
341 ldp x8, x9, [sp, #16 * 4]
342 ldp x10, x11, [sp, #16 * 5]
343 ldp x12, x13, [sp, #16 * 6]
344 ldp x14, x15, [sp, #16 * 7]
345 ldp x16, x17, [sp, #16 * 8]
346 ldp x18, x19, [sp, #16 * 9]
347 ldp x20, x21, [sp, #16 * 10]
348 ldp x22, x23, [sp, #16 * 11]
349 ldp x24, x25, [sp, #16 * 12]
350 ldp x26, x27, [sp, #16 * 13]
351 ldp x28, x29, [sp, #16 * 14]
353 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on eret context synchronization
354 * when returning from IPI handler, and when returning to user-space.
358 alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
360 add sp, sp, #S_FRAME_SIZE // restore sp
362 alternative_else_nop_endif
363 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
366 tramp_alias x30, tramp_exit_native, x29
369 tramp_alias x30, tramp_exit_compat, x29
374 add sp, sp, #S_FRAME_SIZE // restore sp
379 .macro irq_stack_entry
380 mov x19, sp // preserve the original sp
383 * Compare sp with the base of the task stack.
384 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
385 * and should switch to the irq stack.
387 ldr x25, [tsk, TSK_STACK]
389 and x25, x25, #~(THREAD_SIZE - 1)
392 ldr_this_cpu x25, irq_stack_ptr, x26
393 mov x26, #IRQ_STACK_SIZE
396 /* switch to the irq stack */
402 * x19 should be preserved between irq_stack_entry and
405 .macro irq_stack_exit
410 * These are the registers used in the syscall handler, and allow us to
411 * have in theory up to 7 arguments to a function - x0 to x6.
413 * x7 is reserved for the system call number in 32-bit mode.
415 wsc_nr .req w25 // number of system calls
416 xsc_nr .req x25 // number of system calls (zero-extended)
417 wscno .req w26 // syscall number
418 xscno .req x26 // syscall number (zero-extended)
419 stbl .req x27 // syscall table pointer
420 tsk .req x28 // current thread_info
423 * Interrupt handling.
426 ldr_l x1, handle_arch_irq
438 .pushsection ".entry.text", "ax"
442 kernel_ventry 1, sync_invalid // Synchronous EL1t
443 kernel_ventry 1, irq_invalid // IRQ EL1t
444 kernel_ventry 1, fiq_invalid // FIQ EL1t
445 kernel_ventry 1, error_invalid // Error EL1t
447 kernel_ventry 1, sync // Synchronous EL1h
448 kernel_ventry 1, irq // IRQ EL1h
449 kernel_ventry 1, fiq_invalid // FIQ EL1h
450 kernel_ventry 1, error // Error EL1h
452 kernel_ventry 0, sync // Synchronous 64-bit EL0
453 kernel_ventry 0, irq // IRQ 64-bit EL0
454 kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0
455 kernel_ventry 0, error // Error 64-bit EL0
458 kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
459 kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
460 kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0
461 kernel_ventry 0, error_compat, 32 // Error 32-bit EL0
463 kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
464 kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
465 kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
466 kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
470 #ifdef CONFIG_VMAP_STACK
472 * We detected an overflow in kernel_ventry, which switched to the
473 * overflow stack. Stash the exception regs, and head to our overflow
477 /* Restore the original x0 value */
481 * Store the original GPRs to the new stack. The orginal SP (minus
482 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
484 sub sp, sp, #S_FRAME_SIZE
487 add x0, x0, #S_FRAME_SIZE
490 /* Stash the regs for handle_bad_stack */
496 #endif /* CONFIG_VMAP_STACK */
499 * Invalid mode handlers
501 .macro inv_entry, el, reason, regsize = 64
502 kernel_entry \el, \regsize
511 inv_entry 0, BAD_SYNC
512 ENDPROC(el0_sync_invalid)
516 ENDPROC(el0_irq_invalid)
520 ENDPROC(el0_fiq_invalid)
523 inv_entry 0, BAD_ERROR
524 ENDPROC(el0_error_invalid)
527 el0_fiq_invalid_compat:
528 inv_entry 0, BAD_FIQ, 32
529 ENDPROC(el0_fiq_invalid_compat)
533 inv_entry 1, BAD_SYNC
534 ENDPROC(el1_sync_invalid)
538 ENDPROC(el1_irq_invalid)
542 ENDPROC(el1_fiq_invalid)
545 inv_entry 1, BAD_ERROR
546 ENDPROC(el1_error_invalid)
554 mrs x1, esr_el1 // read the syndrome register
555 lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
556 cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
558 cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1
560 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
562 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
564 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
566 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
568 cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
574 * Fall through to the Data abort case
578 * Data abort handling
581 inherit_daif pstate=x23, tmp=x2
582 clear_address_tag x0, x3
583 mov x2, sp // struct pt_regs
589 * Stack or PC alignment exception handling
592 inherit_daif pstate=x23, tmp=x2
598 * Undefined instruction
600 inherit_daif pstate=x23, tmp=x2
606 * Debug exception handling
608 cmp x24, #ESR_ELx_EC_BRK64 // if BRK64
609 cinc x24, x24, eq // set bit '0'
610 tbz x24, #0, el1_inv // EL1 only
612 mov x2, sp // struct pt_regs
613 bl do_debug_exception
616 // TODO: add support for undefined instructions in kernel mode
617 inherit_daif pstate=x23, tmp=x2
629 #ifdef CONFIG_TRACE_IRQFLAGS
630 bl trace_hardirqs_off
635 #ifdef CONFIG_PREEMPT
636 ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
637 cbnz w24, 1f // preempt count != 0
638 ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
639 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
643 #ifdef CONFIG_TRACE_IRQFLAGS
649 #ifdef CONFIG_PREEMPT
652 1: bl preempt_schedule_irq // irq en/disable is done inside
653 ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
654 tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
664 mrs x25, esr_el1 // read the syndrome register
665 lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
666 cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state
668 cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
670 cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
672 cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
674 cmp x24, #ESR_ELx_EC_SVE // SVE access
676 cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
678 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
680 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
682 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
684 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
686 cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
694 mrs x25, esr_el1 // read the syndrome register
695 lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
696 cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state
698 cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
700 cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
702 cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
704 cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
706 cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
708 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
710 cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
712 cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
714 cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
716 cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
718 cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap
720 cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
725 bl el0_svc_compat_handler
740 * Data abort handling
745 clear_address_tag x0, x26
752 * Instruction abort handling
756 #ifdef CONFIG_TRACE_IRQFLAGS
757 bl trace_hardirqs_off
763 bl do_el0_ia_bp_hardening
767 * Floating Point or Advanced SIMD access
777 * Scalable Vector Extension access
787 * Floating Point, Advanced SIMD or SVE exception
797 * Stack or PC alignment exception handling
801 #ifdef CONFIG_TRACE_IRQFLAGS
802 bl trace_hardirqs_off
812 * Undefined instruction
821 * System instructions, for trapped cache maintenance instructions
831 * Debug exception handling
833 tbnz x24, #0, el0_inv // EL0 only
837 bl do_debug_exception
856 #ifdef CONFIG_TRACE_IRQFLAGS
857 bl trace_hardirqs_off
861 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
863 bl do_el0_irq_bp_hardening
868 #ifdef CONFIG_TRACE_IRQFLAGS
896 * Ok, we need to do extra processing, enter the slow path.
901 #ifdef CONFIG_TRACE_IRQFLAGS
902 bl trace_hardirqs_on // enabled while in userspace
904 ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
907 * "slow" syscall return path.
911 ldr x1, [tsk, #TSK_TI_FLAGS]
912 and x2, x1, #_TIF_WORK_MASK
913 cbnz x2, work_pending
915 enable_step_tsk x1, x2
916 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
932 .popsection // .entry.text
934 // Move from tramp_pg_dir to swapper_pg_dir
935 .macro tramp_map_kernel, tmp
937 add \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
938 bic \tmp, \tmp, #USER_ASID_FLAG
940 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
941 alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
942 /* ASID already in \tmp[63:48] */
943 movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
944 movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
945 /* 2MB boundary containing the vectors, so we nobble the walk cache */
946 movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
950 alternative_else_nop_endif
951 #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
954 .macro tramp_unmap_kernel, tmp
956 sub \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
957 orr \tmp, \tmp, #USER_ASID_FLAG
960 * We avoid running the post_ttbr_update_workaround here because
961 * it's only needed by Cavium ThunderX, which requires KPTI to be
966 .macro tramp_data_page dst
967 adr_l \dst, .entry.tramp.text
968 sub \dst, \dst, PAGE_SIZE
971 .macro tramp_data_read_var dst, var
972 #ifdef CONFIG_RANDOMIZE_BASE
974 add \dst, \dst, #:lo12:__entry_tramp_data_\var
981 #define BHB_MITIGATION_NONE 0
982 #define BHB_MITIGATION_LOOP 1
983 #define BHB_MITIGATION_FW 2
984 #define BHB_MITIGATION_INSN 3
986 .macro tramp_ventry, vector_start, regsize, kpti, bhb
990 msr tpidrro_el0, x30 // Restored in kernel_ventry
993 .if \bhb == BHB_MITIGATION_LOOP
995 * This sequence must appear before the first indirect branch. i.e. the
996 * ret out of tramp_ventry. It appears here because x30 is free.
998 __mitigate_spectre_bhb_loop x30
999 .endif // \bhb == BHB_MITIGATION_LOOP
1001 .if \bhb == BHB_MITIGATION_INSN
1004 .endif // \bhb == BHB_MITIGATION_INSN
1008 * Defend against branch aliasing attacks by pushing a dummy
1009 * entry onto the return stack and using a RET instruction to
1010 * enter the full-fat kernel vectors.
1015 tramp_map_kernel x30
1016 alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
1017 tramp_data_read_var x30, vectors
1018 prfm plil1strm, [x30, #(1b - \vector_start)]
1023 .endif // \kpti == 1
1025 .if \bhb == BHB_MITIGATION_FW
1027 * The firmware sequence must appear before the first indirect branch.
1028 * i.e. the ret out of tramp_ventry. But it also needs the stack to be
1029 * mapped to save/restore the registers the SMC clobbers.
1031 __mitigate_spectre_bhb_fw
1032 .endif // \bhb == BHB_MITIGATION_FW
1034 add x30, x30, #(1b - \vector_start + 4)
1036 .org 1b + 128 // Did we overflow the ventry slot?
1039 .macro tramp_exit, regsize = 64
1040 tramp_data_read_var x30, this_cpu_vector
1041 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
1050 tramp_unmap_kernel x29
1054 add sp, sp, #S_FRAME_SIZE // restore sp
1058 .macro generate_tramp_vector, kpti, bhb
1063 tramp_ventry .Lvector_start\@, 64, \kpti, \bhb
1066 tramp_ventry .Lvector_start\@, 32, \kpti, \bhb
1070 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1072 * Exception vectors trampoline.
1073 * The order must match __bp_harden_el1_vectors and the
1074 * arm64_bp_harden_el1_vectors enum.
1076 .pushsection ".entry.tramp.text", "ax"
1078 ENTRY(tramp_vectors)
1079 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
1080 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP
1081 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW
1082 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN
1083 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
1084 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE
1087 ENTRY(tramp_exit_native)
1089 END(tramp_exit_native)
1091 ENTRY(tramp_exit_compat)
1093 END(tramp_exit_compat)
1096 .popsection // .entry.tramp.text
1097 #ifdef CONFIG_RANDOMIZE_BASE
1098 .pushsection ".rodata", "a"
1100 .globl __entry_tramp_data_start
1101 __entry_tramp_data_start:
1102 __entry_tramp_data_vectors:
1104 #ifdef CONFIG_ARM_SDE_INTERFACE
1105 __entry_tramp_data___sdei_asm_handler:
1106 .quad __sdei_asm_handler
1107 #endif /* CONFIG_ARM_SDE_INTERFACE */
1108 __entry_tramp_data_this_cpu_vector:
1109 .quad this_cpu_vector
1110 .popsection // .rodata
1111 #endif /* CONFIG_RANDOMIZE_BASE */
1112 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1115 * Exception vectors for spectre mitigations on entry from EL1 when
1116 * kpti is not in use.
1118 .macro generate_el1_vector, bhb
1120 kernel_ventry 1, sync_invalid // Synchronous EL1t
1121 kernel_ventry 1, irq_invalid // IRQ EL1t
1122 kernel_ventry 1, fiq_invalid // FIQ EL1t
1123 kernel_ventry 1, error_invalid // Error EL1t
1125 kernel_ventry 1, sync // Synchronous EL1h
1126 kernel_ventry 1, irq // IRQ EL1h
1127 kernel_ventry 1, fiq_invalid // FIQ EL1h
1128 kernel_ventry 1, error // Error EL1h
1131 tramp_ventry .Lvector_start\@, 64, 0, \bhb
1134 tramp_ventry .Lvector_start\@, 32, 0, \bhb
1138 /* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
1139 .pushsection ".entry.text", "ax"
1141 ENTRY(__bp_harden_el1_vectors)
1142 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
1143 generate_el1_vector bhb=BHB_MITIGATION_LOOP
1144 generate_el1_vector bhb=BHB_MITIGATION_FW
1145 generate_el1_vector bhb=BHB_MITIGATION_INSN
1146 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
1147 END(__bp_harden_el1_vectors)
1152 * Register switch for AArch64. The callee-saved registers need to be saved
1153 * and restored. On entry:
1154 * x0 = previous task_struct (must be preserved across the switch)
1155 * x1 = next task_struct
1156 * Previous and next are guaranteed not to be the same.
1159 ENTRY(cpu_switch_to)
1160 mov x10, #THREAD_CPU_CONTEXT
1163 stp x19, x20, [x8], #16 // store callee-saved registers
1164 stp x21, x22, [x8], #16
1165 stp x23, x24, [x8], #16
1166 stp x25, x26, [x8], #16
1167 stp x27, x28, [x8], #16
1168 stp x29, x9, [x8], #16
1171 ldp x19, x20, [x8], #16 // restore callee-saved registers
1172 ldp x21, x22, [x8], #16
1173 ldp x23, x24, [x8], #16
1174 ldp x25, x26, [x8], #16
1175 ldp x27, x28, [x8], #16
1176 ldp x29, x9, [x8], #16
1181 ENDPROC(cpu_switch_to)
1182 NOKPROBE(cpu_switch_to)
1185 * This is how we return from a fork.
1187 ENTRY(ret_from_fork)
1189 cbz x19, 1f // not a kernel thread
1192 1: get_thread_info tsk
1194 ENDPROC(ret_from_fork)
1195 NOKPROBE(ret_from_fork)
1197 #ifdef CONFIG_ARM_SDE_INTERFACE
1199 #include <asm/sdei.h>
1200 #include <uapi/linux/arm_sdei.h>
1202 .macro sdei_handler_exit exit_mode
1203 /* On success, this call never returns... */
1204 cmp \exit_mode, #SDEI_EXIT_SMC
1212 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1214 * The regular SDEI entry point may have been unmapped along with the rest of
1215 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
1216 * argument accessible.
1218 * This clobbers x4, __sdei_handler() will restore this from firmware's
1222 .pushsection ".entry.tramp.text", "ax"
1223 ENTRY(__sdei_asm_entry_trampoline)
1225 tbz x4, #USER_ASID_BIT, 1f
1227 tramp_map_kernel tmp=x4
1232 * Use reg->interrupted_regs.addr_limit to remember whether to unmap
1233 * the kernel on exit.
1235 1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1237 tramp_data_read_var x4, __sdei_asm_handler
1239 ENDPROC(__sdei_asm_entry_trampoline)
1240 NOKPROBE(__sdei_asm_entry_trampoline)
1243 * Make the exit call and restore the original ttbr1_el1
1245 * x0 & x1: setup for the exit API call
1247 * x4: struct sdei_registered_event argument from registration time.
1249 ENTRY(__sdei_asm_exit_trampoline)
1250 ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1253 tramp_unmap_kernel tmp=x4
1255 1: sdei_handler_exit exit_mode=x2
1256 ENDPROC(__sdei_asm_exit_trampoline)
1257 NOKPROBE(__sdei_asm_exit_trampoline)
1259 .popsection // .entry.tramp.text
1260 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1263 * Software Delegated Exception entry point.
1266 * x1: struct sdei_registered_event argument from registration time.
1267 * x2: interrupted PC
1268 * x3: interrupted PSTATE
1269 * x4: maybe clobbered by the trampoline
1271 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1272 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1275 ENTRY(__sdei_asm_handler)
1276 stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1277 stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1278 stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1279 stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1280 stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1281 stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1282 stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1283 stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1284 stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1285 stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1286 stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1287 stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1288 stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1289 stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1291 stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1295 #ifdef CONFIG_VMAP_STACK
1297 * entry.S may have been using sp as a scratch register, find whether
1298 * this is a normal or critical event and switch to the appropriate
1299 * stack for this CPU.
1301 ldrb w4, [x19, #SDEI_EVENT_PRIORITY]
1303 ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1305 1: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
1306 2: mov x6, #SDEI_STACK_SIZE
1312 * We may have interrupted userspace, or a guest, or exit-from or
1313 * return-to either of these. We can't trust sp_el0, restore it.
1316 ldr_this_cpu dst=x0, sym=__entry_task, tmp=x1
1319 /* If we interrupted the kernel point to the previous stack/frame. */
1323 csel x29, x29, xzr, eq // fp, or zero
1324 csel x4, x2, xzr, eq // elr, or zero
1326 stp x29, x4, [sp, #-16]!
1329 add x0, x19, #SDEI_EVENT_INTREGS
1334 /* restore regs >x17 that we clobbered */
1335 mov x4, x19 // keep x4 for __sdei_asm_exit_trampoline
1336 ldp x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1337 ldp x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1338 ldp lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1341 mov x1, x0 // address to complete_and_resume
1342 /* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1344 mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1345 mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1348 ldr_l x2, sdei_exit_mode
1350 alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1351 sdei_handler_exit exit_mode=x2
1352 alternative_else_nop_endif
1354 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1355 tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3
1358 ENDPROC(__sdei_asm_handler)
1359 NOKPROBE(__sdei_asm_handler)
1360 #endif /* CONFIG_ARM_SDE_INTERFACE */