2 * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
4 * Copyright (C) 1996-2000 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #error "Only include this from assembly code"
23 #ifndef __ASM_ASSEMBLER_H
24 #define __ASM_ASSEMBLER_H
26 #include <asm/asm-offsets.h>
27 #include <asm/cpufeature.h>
28 #include <asm/debug-monitors.h>
30 #include <asm/pgtable-hwdef.h>
31 #include <asm/ptrace.h>
32 #include <asm/thread_info.h>
34 .macro save_and_disable_daif, flags
47 .macro restore_daif, flags:req
51 /* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */
52 .macro inherit_daif, pstate:req, tmp:req
53 and \tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
57 /* IRQ is the lowest priority flag, unconditionally unmask the rest. */
59 msr daifclr, #(8 | 4 | 1)
63 * Enable and disable interrupts.
73 .macro save_and_disable_irq, flags
78 .macro restore_irq, flags
86 .macro disable_step_tsk, flgs, tmp
87 tbz \flgs, #TIF_SINGLESTEP, 9990f
89 bic \tmp, \tmp, #DBG_MDSCR_SS
91 isb // Synchronise with enable_dbg
95 /* call with daif masked */
96 .macro enable_step_tsk, flgs, tmp
97 tbz \flgs, #TIF_SINGLESTEP, 9990f
99 orr \tmp, \tmp, #DBG_MDSCR_SS
105 * SMP data memory barrier
112 * RAS Error Synchronization barrier
115 #ifdef CONFIG_ARM64_RAS_EXTN
123 * Value prediction barrier
130 * Clear Branch History instruction
137 * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
140 .macro mask_nospec64, idx, limit, tmp
141 sub \tmp, \idx, \limit
143 and \idx, \idx, \tmp, asr #63
157 * Emit an entry into the exception table
159 .macro _asm_extable, from, to
160 .pushsection __ex_table, "a"
162 .long (\from - .), (\to - .)
166 #define USER(l, x...) \
168 _asm_extable 9999b, l
173 lr .req x30 // link register
184 * Select code when configured for BE.
186 #ifdef CONFIG_CPU_BIG_ENDIAN
187 #define CPU_BE(code...) code
189 #define CPU_BE(code...)
193 * Select code when configured for LE.
195 #ifdef CONFIG_CPU_BIG_ENDIAN
196 #define CPU_LE(code...)
198 #define CPU_LE(code...) code
202 * Define a macro that constructs a 64-bit value by concatenating two
203 * 32-bit registers. Note that on big endian systems the order of the
204 * registers is swapped.
206 #ifndef CONFIG_CPU_BIG_ENDIAN
207 .macro regs_to_64, rd, lbits, hbits
209 .macro regs_to_64, rd, hbits, lbits
211 orr \rd, \lbits, \hbits, lsl #32
215 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
216 * <symbol> is within the range +/- 4 GB of the PC.
219 * @dst: destination register (64 bit wide)
220 * @sym: name of the symbol
222 .macro adr_l, dst, sym
224 add \dst, \dst, :lo12:\sym
228 * @dst: destination register (32 or 64 bit wide)
229 * @sym: name of the symbol
230 * @tmp: optional 64-bit scratch register to be used if <dst> is a
231 * 32-bit wide register, in which case it cannot be used to hold
234 .macro ldr_l, dst, sym, tmp=
237 ldr \dst, [\dst, :lo12:\sym]
240 ldr \dst, [\tmp, :lo12:\sym]
245 * @src: source register (32 or 64 bit wide)
246 * @sym: name of the symbol
247 * @tmp: mandatory 64-bit scratch register to calculate the address
248 * while <src> needs to be preserved.
250 .macro str_l, src, sym, tmp
252 str \src, [\tmp, :lo12:\sym]
256 * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
257 * @sym: The name of the per-cpu variable
258 * @tmp: scratch register
260 .macro adr_this_cpu, dst, sym, tmp
262 add \dst, \tmp, #:lo12:\sym
263 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
272 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
273 * @sym: The name of the per-cpu variable
274 * @tmp: scratch register
276 .macro ldr_this_cpu dst, sym, tmp
278 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
283 ldr \dst, [\dst, \tmp]
287 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
289 .macro vma_vm_mm, rd, rn
290 ldr \rd, [\rn, #VMA_VM_MM]
294 * mmid - get context id from mm pointer (mm->context.id)
297 ldr \rd, [\rn, #MM_CONTEXT_ID]
300 * read_ctr - read CTR_EL0. If the system has mismatched
301 * cache line sizes, provide the system wide safe value
302 * from arm64_ftr_reg_ctrel0.sys_val
305 alternative_if_not ARM64_MISMATCHED_CACHE_LINE_SIZE
306 mrs \reg, ctr_el0 // read CTR
309 ldr_l \reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
315 * raw_dcache_line_size - get the minimum D-cache line size on this CPU
316 * from the CTR register.
318 .macro raw_dcache_line_size, reg, tmp
319 mrs \tmp, ctr_el0 // read CTR
320 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
321 mov \reg, #4 // bytes per word
322 lsl \reg, \reg, \tmp // actual cache line size
326 * dcache_line_size - get the safe D-cache line size across all CPUs
328 .macro dcache_line_size, reg, tmp
330 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
331 mov \reg, #4 // bytes per word
332 lsl \reg, \reg, \tmp // actual cache line size
336 * raw_icache_line_size - get the minimum I-cache line size on this CPU
337 * from the CTR register.
339 .macro raw_icache_line_size, reg, tmp
340 mrs \tmp, ctr_el0 // read CTR
341 and \tmp, \tmp, #0xf // cache line size encoding
342 mov \reg, #4 // bytes per word
343 lsl \reg, \reg, \tmp // actual cache line size
347 * icache_line_size - get the safe I-cache line size across all CPUs
349 .macro icache_line_size, reg, tmp
351 and \tmp, \tmp, #0xf // cache line size encoding
352 mov \reg, #4 // bytes per word
353 lsl \reg, \reg, \tmp // actual cache line size
357 * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
359 .macro tcr_set_idmap_t0sz, valreg, tmpreg
360 ldr_l \tmpreg, idmap_t0sz
361 bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
365 * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
366 * ID_AA64MMFR0_EL1.PARange value
368 * tcr: register with the TCR_ELx value to be updated
369 * pos: IPS or PS bitfield position
370 * tmp{0,1}: temporary registers
372 .macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1
373 mrs \tmp0, ID_AA64MMFR0_EL1
374 // Narrow PARange to fit the PS field in TCR_ELx
375 ubfx \tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
376 mov \tmp1, #ID_AA64MMFR0_PARANGE_MAX
378 csel \tmp0, \tmp1, \tmp0, hi
379 bfi \tcr, \tmp0, \pos, #3
383 * Macro to perform a data cache maintenance for the interval
384 * [kaddr, kaddr + size)
386 * op: operation passed to dc instruction
387 * domain: domain used in dsb instruciton
388 * kaddr: starting virtual address of the region
389 * size: size of the region
390 * Corrupts: kaddr, size, tmp1, tmp2
392 .macro __dcache_op_workaround_clean_cache, op, kaddr
393 alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
400 .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
401 dcache_line_size \tmp1, \tmp2
402 add \size, \kaddr, \size
404 bic \kaddr, \kaddr, \tmp2
407 __dcache_op_workaround_clean_cache \op, \kaddr
410 __dcache_op_workaround_clean_cache \op, \kaddr
413 sys 3, c7, c12, 1, \kaddr // dc cvap
419 add \kaddr, \kaddr, \tmp1
426 * Macro to perform an instruction cache maintenance for the interval
429 * start, end: virtual addresses describing the region
430 * label: A label to branch to on user fault.
431 * Corrupts: tmp1, tmp2
433 .macro invalidate_icache_by_line start, end, tmp1, tmp2, label
434 icache_line_size \tmp1, \tmp2
436 bic \tmp2, \start, \tmp2
438 USER(\label, ic ivau, \tmp2) // invalidate I line PoU
439 add \tmp2, \tmp2, \tmp1
447 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
449 .macro reset_pmuserenr_el0, tmpreg
450 mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
451 sbfx \tmpreg, \tmpreg, #8, #4
452 cmp \tmpreg, #1 // Skip if no PMU present
454 msr pmuserenr_el0, xzr // Disable PMU access from EL0
459 * copy_page - copy src to dest using temp registers t1-t8
461 .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
462 9998: ldp \t1, \t2, [\src]
463 ldp \t3, \t4, [\src, #16]
464 ldp \t5, \t6, [\src, #32]
465 ldp \t7, \t8, [\src, #48]
467 stnp \t1, \t2, [\dest]
468 stnp \t3, \t4, [\dest, #16]
469 stnp \t5, \t6, [\dest, #32]
470 stnp \t7, \t8, [\dest, #48]
471 add \dest, \dest, #64
472 tst \src, #(PAGE_SIZE - 1)
477 * Annotate a function as position independent, i.e., safe to be called before
478 * the kernel virtual mapping is activated.
480 #define ENDPIPROC(x) \
482 .type __pi_##x, %function; \
484 .size __pi_##x, . - x; \
488 * Annotate a function as being unsuitable for kprobes.
490 #ifdef CONFIG_KPROBES
491 #define NOKPROBE(x) \
492 .pushsection "_kprobe_blacklist", "aw"; \
499 * Emit a 64-bit absolute little endian symbol reference in a way that
500 * ensures that it will be resolved at build time, even when building a
501 * PIE binary. This requires cooperation from the linker script, which
502 * must emit the lo32/hi32 halves individually.
510 * mov_q - move an immediate constant into a 64-bit register using
511 * between 2 and 4 movz/movk instructions (depending on the
512 * magnitude and sign of the operand)
514 .macro mov_q, reg, val
515 .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
516 movz \reg, :abs_g1_s:\val
518 .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
519 movz \reg, :abs_g2_s:\val
521 movz \reg, :abs_g3:\val
522 movk \reg, :abs_g2_nc:\val
524 movk \reg, :abs_g1_nc:\val
526 movk \reg, :abs_g0_nc:\val
530 * Return the current thread_info.
532 .macro get_thread_info, rd
537 * Arrange a physical address in a TTBR register, taking care of 52-bit
540 * phys: physical address, preserved
541 * ttbr: returns the TTBR value
543 .macro phys_to_ttbr, ttbr, phys
544 #ifdef CONFIG_ARM64_PA_BITS_52
545 orr \ttbr, \phys, \phys, lsr #46
546 and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
552 .macro phys_to_pte, pte, phys
553 #ifdef CONFIG_ARM64_PA_BITS_52
555 * We assume \phys is 64K aligned and this is guaranteed by only
556 * supporting this configuration with 64K pages.
558 orr \pte, \phys, \phys, lsr #36
559 and \pte, \pte, #PTE_ADDR_MASK
565 .macro pte_to_phys, phys, pte
566 #ifdef CONFIG_ARM64_PA_BITS_52
567 ubfiz \phys, \pte, #(48 - 16 - 12), #16
568 bfxil \phys, \pte, #16, #32
569 lsl \phys, \phys, #16
571 and \phys, \pte, #PTE_ADDR_MASK
576 * Errata workaround prior to disable MMU. Insert an ISB immediately prior
577 * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
579 .macro pre_disable_mmu_workaround
580 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
586 * frame_push - Push @regcount callee saved registers to the stack,
587 * starting at x19, as well as x29/x30, and set x29 to
588 * the new value of sp. Add @extra bytes of stack space
591 .macro frame_push, regcount:req, extra
592 __frame st, \regcount, \extra
596 * frame_pop - Pop the callee saved registers from the stack that were
597 * pushed in the most recent call to frame_push, as well
598 * as x29/x30 and any extra stack space that may have been
605 .macro __frame_regs, reg1, reg2, op, num
606 .if .Lframe_regcount == \num
607 \op\()r \reg1, [sp, #(\num + 1) * 8]
608 .elseif .Lframe_regcount > \num
609 \op\()p \reg1, \reg2, [sp, #(\num + 1) * 8]
613 .macro __frame, op, regcount, extra=0
615 .if (\regcount) < 0 || (\regcount) > 10
616 .error "regcount should be in the range [0 ... 10]"
618 .if ((\extra) % 16) != 0
619 .error "extra should be a multiple of 16 bytes"
621 .ifdef .Lframe_regcount
622 .if .Lframe_regcount != -1
623 .error "frame_push/frame_pop may not be nested"
626 .set .Lframe_regcount, \regcount
627 .set .Lframe_extra, \extra
628 .set .Lframe_local_offset, ((\regcount + 3) / 2) * 16
629 stp x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
633 __frame_regs x19, x20, \op, 1
634 __frame_regs x21, x22, \op, 3
635 __frame_regs x23, x24, \op, 5
636 __frame_regs x25, x26, \op, 7
637 __frame_regs x27, x28, \op, 9
640 .if .Lframe_regcount == -1
641 .error "frame_push/frame_pop may not be nested"
643 ldp x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
644 .set .Lframe_regcount, -1
649 * Check whether to yield to another runnable task from kernel mode NEON code
650 * (which runs with preemption disabled).
652 * if_will_cond_yield_neon
653 * // pre-yield patchup code
655 * // post-yield patchup code
656 * endif_yield_neon <label>
658 * where <label> is optional, and marks the point where execution will resume
659 * after a yield has been performed. If omitted, execution resumes right after
660 * the endif_yield_neon invocation. Note that the entire sequence, including
661 * the provided patchup code, will be omitted from the image if CONFIG_PREEMPT
664 * As a convenience, in the case where no patchup code is required, the above
665 * sequence may be abbreviated to
667 * cond_yield_neon <label>
669 * Note that the patchup code does not support assembler directives that change
670 * the output section, any use of such directives is undefined.
672 * The yield itself consists of the following:
673 * - Check whether the preempt count is exactly 1, in which case disabling
674 * preemption once will make the task preemptible. If this is not the case,
675 * yielding is pointless.
676 * - Check whether TIF_NEED_RESCHED is set, and if so, disable and re-enable
677 * kernel mode NEON (which will trigger a reschedule), and branch to the
680 * This macro sequence may clobber all CPU state that is not guaranteed by the
681 * AAPCS to be preserved across an ordinary function call.
684 .macro cond_yield_neon, lbl
685 if_will_cond_yield_neon
687 endif_yield_neon \lbl
690 .macro if_will_cond_yield_neon
691 #ifdef CONFIG_PREEMPT
693 ldr w1, [x0, #TSK_TI_PREEMPT]
694 ldr x0, [x0, #TSK_TI_FLAGS]
695 cmp w1, #PREEMPT_DISABLE_OFFSET
697 tbnz x0, #TIF_NEED_RESCHED, .Lyield_\@ // needs rescheduling?
698 /* fall through to endif_yield_neon */
702 .section ".discard.cond_yield_neon", "ax"
706 .macro do_cond_yield_neon
711 .macro endif_yield_neon, lbl
721 .macro __mitigate_spectre_bhb_loop tmp
722 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
723 alternative_cb spectre_bhb_patch_loop_iter
724 mov \tmp, #32 // Patched to correct the immediate
726 .Lspectre_bhb_loop\@:
729 b.ne .Lspectre_bhb_loop\@
732 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
735 /* Save/restores x0-x3 to the stack */
736 .macro __mitigate_spectre_bhb_fw
737 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
738 stp x0, x1, [sp, #-16]!
739 stp x2, x3, [sp, #-16]!
740 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
741 alternative_cb arm64_update_smccc_conduit
742 nop // Patched to SMC/HVC #0
744 ldp x2, x3, [sp], #16
745 ldp x0, x1, [sp], #16
746 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
748 #endif /* __ASM_ASSEMBLER_H */