2 * linux/arch/arm/boot/compressed/head.S
4 * Copyright (C) 1996-2002 Russell King
5 * Copyright (C) 2004 Hyok S. Choi (MPU support)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/linkage.h>
12 #include <asm/assembler.h>
15 #include "efi-header.S"
17 AR_CLASS( .arch armv7-a )
18 M_CLASS( .arch armv7-m )
23 * Note that these macros must not contain any code which is not
24 * 100% relocatable. Any attempt to do so will result in a crash.
25 * Please select one of the following when turning on debugging.
29 #if defined(CONFIG_DEBUG_ICEDCC)
31 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
32 .macro loadsp, rb, tmp
35 mcr p14, 0, \ch, c0, c5, 0
37 #elif defined(CONFIG_CPU_XSCALE)
38 .macro loadsp, rb, tmp
41 mcr p14, 0, \ch, c8, c0, 0
44 .macro loadsp, rb, tmp
47 mcr p14, 0, \ch, c1, c0, 0
53 #include CONFIG_DEBUG_LL_INCLUDE
59 #if defined(CONFIG_ARCH_SA1100)
60 .macro loadsp, rb, tmp
61 mov \rb, #0x80000000 @ physical base address
62 #ifdef CONFIG_DEBUG_LL_SER3
63 add \rb, \rb, #0x00050000 @ Ser3
65 add \rb, \rb, #0x00010000 @ Ser1
69 .macro loadsp, rb, tmp
87 .macro debug_reloc_start
90 kphex r6, 8 /* processor id */
92 kphex r7, 8 /* architecture id */
93 #ifdef CONFIG_CPU_CP15
95 mrc p15, 0, r0, c1, c0
96 kphex r0, 8 /* control reg */
99 kphex r5, 8 /* decompressed kernel start */
101 kphex r9, 8 /* decompressed kernel end */
103 kphex r4, 8 /* kernel execution address */
108 .macro debug_reloc_end
110 kphex r5, 8 /* end of kernel */
113 bl memdump /* dump 256 bytes at start of kernel */
117 .section ".start", #alloc, #execinstr
119 * sort out different calling conventions
123 * Always enter in ARM state for CPUs that support the ARM ISA.
124 * As of today (2014) that's exactly the members of the A and R
129 .type start,#function
133 #ifndef CONFIG_THUMB2_KERNEL
136 AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode
137 M_CLASS( nop.w ) @ M: already in Thumb2 mode
142 .word _magic_sig @ Magic numbers to help the loader
143 .word _magic_start @ absolute load/run zImage address
144 .word _magic_end @ zImage end address
145 .word 0x04030201 @ endianness flag
149 ARM_BE8( setend be ) @ go BE8 if compiled for BE8
150 AR_CLASS( mrs r9, cpsr )
151 #ifdef CONFIG_ARM_VIRT_EXT
152 bl __hyp_stub_install @ get into SVC mode, reversibly
154 mov r7, r1 @ save architecture ID
155 mov r8, r2 @ save atags pointer
157 #ifndef CONFIG_CPU_V7M
159 * Booting from Angel - need to enter SVC mode and disable
160 * FIQs/IRQs (numeric definitions from angel arm.h source).
161 * We only do this if we were in user mode on entry.
163 mrs r2, cpsr @ get current mode
164 tst r2, #3 @ not user?
166 mov r0, #0x17 @ angel_SWIreason_EnterSVC
167 ARM( swi 0x123456 ) @ angel_SWI_ARM
168 THUMB( svc 0xab ) @ angel_SWI_THUMB
170 safe_svcmode_maskall r0
171 msr spsr_cxsf, r9 @ Save the CPU boot mode in
175 * Note that some cache flushing and other stuff may
176 * be needed here - is there an Angel SWI call for this?
180 * some architecture specific code can be inserted
181 * by the linker here, but it should preserve r7, r8, and r9.
186 #ifdef CONFIG_AUTO_ZRELADDR
188 * Find the start of physical memory. As we are executing
189 * without the MMU on, we are in the physical address space.
190 * We just need to get rid of any offset by aligning the
193 * This alignment is a balance between the requirements of
194 * different platforms - we have chosen 128MB to allow
195 * platforms which align the start of their physical memory
196 * to 128MB to use this feature, while allowing the zImage
197 * to be placed within the first 128MB of memory on other
198 * platforms. Increasing the alignment means we place
199 * stricter alignment requirements on the start of physical
200 * memory, but relaxing it means that we break people who
201 * are already placing their zImage in (eg) the top 64MB
205 and r4, r4, #0xf8000000
206 /* Determine final kernel image address. */
207 add r4, r4, #TEXT_OFFSET
213 * Set up a page table only if it won't overwrite ourself.
214 * That means r4 < pc || r4 - 16k page directory > &_end.
215 * Given that r4 > &_end is most unfrequent, we add a rough
216 * additional 1MB of room for a possible appended DTB.
223 orrcc r4, r4, #1 @ remember we skipped cache_on
227 ldmia r0, {r1, r2, r3, r6, r10, r11, r12}
231 * We might be running at a different address. We need
232 * to fix up various pointers.
234 sub r0, r0, r1 @ calculate the delta offset
235 add r6, r6, r0 @ _edata
236 add r10, r10, r0 @ inflated kernel size location
239 * The kernel build system appends the size of the
240 * decompressed kernel at the end of the compressed data
241 * in little-endian form.
245 orr r9, r9, lr, lsl #8
248 orr r9, r9, lr, lsl #16
249 orr r9, r9, r10, lsl #24
251 #ifndef CONFIG_ZBOOT_ROM
252 /* malloc space is above the relocated stack (64k max) */
254 add r10, sp, #0x10000
257 * With ZBOOT_ROM the bss/stack is non relocatable,
258 * but someone could still run this code from RAM,
259 * in which case our reference is _edata.
264 mov r5, #0 @ init dtb size to 0
265 #ifdef CONFIG_ARM_APPENDED_DTB
270 * r4 = final kernel address (possibly with LSB set)
271 * r5 = appended dtb size (still unknown)
273 * r7 = architecture ID
274 * r8 = atags/device tree pointer
275 * r9 = size of decompressed image
276 * r10 = end of this image, including bss/stack/malloc space if non XIP
281 * if there are device trees (dtb) appended to zImage, advance r10 so that the
282 * dtb data will get relocated along with the kernel if necessary.
287 ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian
292 bne dtb_check_done @ not found
294 #ifdef CONFIG_ARM_ATAG_DTB_COMPAT
296 * OK... Let's do some funky business here.
297 * If we do have a DTB appended to zImage, and we do have
298 * an ATAG list around, we want the later to be translated
299 * and folded into the former here. No GOT fixup has occurred
300 * yet, but none of the code we're about to call uses any
304 /* Get the initial DTB size */
307 /* convert to little endian */
308 eor r1, r5, r5, ror #16
309 bic r1, r1, #0x00ff0000
311 eor r5, r5, r1, lsr #8
313 /* 50% DTB growth should be good enough */
314 add r5, r5, r5, lsr #1
315 /* preserve 64-bit alignment */
318 /* clamp to 32KB min and 1MB max */
323 /* temporarily relocate the stack past the DTB work space */
326 stmfd sp!, {r0-r3, ip, lr}
333 * If returned value is 1, there is no ATAG at the location
334 * pointed by r8. Try the typical 0x100 offset from start
335 * of RAM and hope for the best.
338 sub r0, r4, #TEXT_OFFSET
345 ldmfd sp!, {r0-r3, ip, lr}
349 mov r8, r6 @ use the appended device tree
352 * Make sure that the DTB doesn't end up in the final
353 * kernel's .bss area. To do so, we adjust the decompressed
354 * kernel size to compensate if that .bss size is larger
355 * than the relocated code.
357 ldr r5, =_kernel_bss_size
358 adr r1, wont_overwrite
363 /* Get the current DTB size */
366 /* convert r5 (dtb size) to little endian */
367 eor r1, r5, r5, ror #16
368 bic r1, r1, #0x00ff0000
370 eor r5, r5, r1, lsr #8
373 /* preserve 64-bit alignment */
377 /* relocate some pointers past the appended dtb */
385 * Check to see if we will overwrite ourselves.
386 * r4 = final kernel address (possibly with LSB set)
387 * r9 = size of decompressed image
388 * r10 = end of this image, including bss/stack/malloc space if non XIP
390 * r4 - 16k page directory >= r10 -> OK
391 * r4 + image length <= address of wont_overwrite -> OK
392 * Note: the possible LSB in r4 is harmless here.
398 adr r9, wont_overwrite
403 * Relocate ourselves past the end of the decompressed kernel.
405 * r10 = end of the decompressed kernel
406 * Because we always copy ahead, we need to do it from the end and go
407 * backward in case the source and destination overlap.
410 * Bump to the next 256-byte boundary with the size of
411 * the relocation code added. This avoids overwriting
412 * ourself when the offset is small.
414 add r10, r10, #((reloc_code_end - restart + 256) & ~255)
417 /* Get start of code we want to copy and align it down. */
421 /* Relocate the hyp vector base if necessary */
422 #ifdef CONFIG_ARM_VIRT_EXT
424 and r0, r0, #MODE_MASK
435 sub r9, r6, r5 @ size to copy
436 add r9, r9, #31 @ rounded up to a multiple
437 bic r9, r9, #31 @ ... of 32 bytes
441 1: ldmdb r6!, {r0 - r3, r10 - r12, lr}
443 stmdb r9!, {r0 - r3, r10 - r12, lr}
446 /* Preserve offset to relocated code. */
449 #ifndef CONFIG_ZBOOT_ROM
450 /* cache_clean_flush may use the stack, so relocate it */
462 * If delta is zero, we are running at the address we were linked at.
466 * r4 = kernel execution address (possibly with LSB set)
467 * r5 = appended dtb size (0 if not present)
468 * r7 = architecture ID
480 #ifndef CONFIG_ZBOOT_ROM
482 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
483 * we need to fix up pointers into the BSS region.
484 * Note that the stack pointer has already been fixed up.
490 * Relocate all entries in the GOT table.
491 * Bump bss entries to _edata + dtb size
493 1: ldr r1, [r11, #0] @ relocate entries in the GOT
494 add r1, r1, r0 @ This fixes up C references
495 cmp r1, r2 @ if entry >= bss_start &&
496 cmphs r3, r1 @ bss_end > entry
497 addhi r1, r1, r5 @ entry += dtb size
498 str r1, [r11], #4 @ next entry
502 /* bump our bss pointers too */
509 * Relocate entries in the GOT table. We only relocate
510 * the entries that are outside the (relocated) BSS region.
512 1: ldr r1, [r11, #0] @ relocate entries in the GOT
513 cmp r1, r2 @ entry < bss_start ||
514 cmphs r3, r1 @ _end < entry
515 addlo r1, r1, r0 @ table. This fixes up the
516 str r1, [r11], #4 @ C references.
521 not_relocated: mov r0, #0
522 1: str r0, [r2], #4 @ clear bss
530 * Did we skip the cache setup earlier?
531 * That is indicated by the LSB in r4.
539 * The C runtime environment should now be setup sufficiently.
540 * Set up some pointers, and start decompressing.
541 * r4 = kernel execution address
542 * r7 = architecture ID
546 mov r1, sp @ malloc space above stack
547 add r2, sp, #0x10000 @ 64k max
552 mov r1, r7 @ restore architecture number
553 mov r2, r8 @ restore atags pointer
555 #ifdef CONFIG_ARM_VIRT_EXT
556 mrs r0, spsr @ Get saved CPU boot mode
557 and r0, r0, #MODE_MASK
558 cmp r0, #HYP_MODE @ if not booted in HYP mode...
559 bne __enter_kernel @ boot kernel directly
561 adr r12, .L__hyp_reentry_vectors_offset
566 __HVC(0) @ otherwise bounce to hyp mode
568 b . @ should never be reached
571 .L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - .
579 .word __bss_start @ r2
582 .word input_data_end - 4 @ r10 (inflated size location)
583 .word _got_start @ r11
585 .word .L_user_stack_end @ sp
586 .word _end - restart + 16384 + 1024*1024
589 #ifdef CONFIG_ARCH_RPC
591 params: ldr r0, =0x10000100 @ params_phys for RPC
598 * Turn on the cache. We need to setup some page tables so that we
599 * can have both the I and D caches on.
601 * We place the page tables 16k down from the kernel execution address,
602 * and we hope that nothing else is using it. If we're using it, we
606 * r4 = kernel execution address
607 * r7 = architecture number
610 * r0, r1, r2, r3, r9, r10, r12 corrupted
611 * This routine must preserve:
615 cache_on: mov r3, #8 @ cache_on function
619 * Initialize the highest priority protection region, PR7
620 * to cover all 32bit address and cacheable and bufferable.
622 __armv4_mpu_cache_on:
623 mov r0, #0x3f @ 4G, the whole
624 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
625 mcr p15, 0, r0, c6, c7, 1
628 mcr p15, 0, r0, c2, c0, 0 @ D-cache on
629 mcr p15, 0, r0, c2, c0, 1 @ I-cache on
630 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
633 mcr p15, 0, r0, c5, c0, 1 @ I-access permission
634 mcr p15, 0, r0, c5, c0, 0 @ D-access permission
637 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
638 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
639 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
640 mrc p15, 0, r0, c1, c0, 0 @ read control reg
641 @ ...I .... ..D. WC.M
642 orr r0, r0, #0x002d @ .... .... ..1. 11.1
643 orr r0, r0, #0x1000 @ ...1 .... .... ....
645 mcr p15, 0, r0, c1, c0, 0 @ write control reg
648 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
649 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
652 __armv3_mpu_cache_on:
653 mov r0, #0x3f @ 4G, the whole
654 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
657 mcr p15, 0, r0, c2, c0, 0 @ cache on
658 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
661 mcr p15, 0, r0, c5, c0, 0 @ access permission
664 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
666 * ?? ARMv3 MMU does not allow reading the control register,
667 * does this really work on ARMv3 MPU?
669 mrc p15, 0, r0, c1, c0, 0 @ read control reg
670 @ .... .... .... WC.M
671 orr r0, r0, #0x000d @ .... .... .... 11.1
672 /* ?? this overwrites the value constructed above? */
674 mcr p15, 0, r0, c1, c0, 0 @ write control reg
676 /* ?? invalidate for the second time? */
677 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
680 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
686 __setup_mmu: sub r3, r4, #16384 @ Page directory size
687 bic r3, r3, #0xff @ Align the pointer
690 * Initialise the page tables, turning on the cacheable and bufferable
691 * bits for the RAM area only.
695 mov r9, r9, lsl #18 @ start of RAM
696 add r10, r9, #0x10000000 @ a reasonable RAM size
697 mov r1, #0x12 @ XN|U + section mapping
698 orr r1, r1, #3 << 10 @ AP=11
700 1: cmp r1, r9 @ if virt > start of RAM
701 cmphs r10, r1 @ && end of RAM > virt
702 bic r1, r1, #0x1c @ clear XN|U + C + B
703 orrlo r1, r1, #0x10 @ Set XN|U for non-RAM
704 orrhs r1, r1, r6 @ set RAM section settings
705 str r1, [r0], #4 @ 1:1 mapping
710 * If ever we are running from Flash, then we surely want the cache
711 * to be enabled also for our execution instance... We map 2MB of it
712 * so there is no map overlap problem for up to 1 MB compressed kernel.
713 * If the execution is in RAM then we would only be duplicating the above.
715 orr r1, r6, #0x04 @ ensure B is set for this
719 orr r1, r1, r2, lsl #20
720 add r0, r3, r2, lsl #2
727 @ Enable unaligned access on v6, to allow better code generation
728 @ for the decompressor C code:
729 __armv6_mmu_cache_on:
730 mrc p15, 0, r0, c1, c0, 0 @ read SCTLR
731 bic r0, r0, #2 @ A (no unaligned access fault)
732 orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
733 mcr p15, 0, r0, c1, c0, 0 @ write SCTLR
734 b __armv4_mmu_cache_on
736 __arm926ejs_mmu_cache_on:
737 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
738 mov r0, #4 @ put dcache in WT mode
739 mcr p15, 7, r0, c15, c0, 0
742 __armv4_mmu_cache_on:
745 mov r6, #CB_BITS | 0x12 @ U
748 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
749 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
750 mrc p15, 0, r0, c1, c0, 0 @ read control reg
751 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
753 ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
754 bl __common_mmu_cache_on
756 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
760 __armv7_mmu_cache_on:
763 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
765 movne r6, #CB_BITS | 0x02 @ !XN
768 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
770 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
772 mrc p15, 0, r0, c1, c0, 0 @ read control reg
773 bic r0, r0, #1 << 28 @ clear SCTLR.TRE
774 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
775 orr r0, r0, #0x003c @ write buffer
776 bic r0, r0, #2 @ A (no unaligned access fault)
777 orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
778 @ (needed for ARM1176)
780 ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
781 mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
782 orrne r0, r0, #1 @ MMU enabled
783 movne r1, #0xfffffffd @ domain 0 = client
784 bic r6, r6, #1 << 31 @ 32-bit translation system
785 bic r6, r6, #(7 << 0) | (1 << 4) @ use only ttbr0
786 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
787 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
788 mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
790 mcr p15, 0, r0, c7, c5, 4 @ ISB
791 mcr p15, 0, r0, c1, c0, 0 @ load control register
792 mrc p15, 0, r0, c1, c0, 0 @ and read it back
794 mcr p15, 0, r0, c7, c5, 4 @ ISB
799 mov r6, #CB_BITS | 0x12 @ U
802 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
803 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
804 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
805 mrc p15, 0, r0, c1, c0, 0 @ read control reg
806 orr r0, r0, #0x1000 @ I-cache enable
807 bl __common_mmu_cache_on
809 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
812 __common_mmu_cache_on:
813 #ifndef CONFIG_THUMB2_KERNEL
815 orr r0, r0, #0x000d @ Write buffer, mmu
818 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
819 mcr p15, 0, r1, c3, c0, 0 @ load domain access control
821 .align 5 @ cache line aligned
822 1: mcr p15, 0, r0, c1, c0, 0 @ load control register
823 mrc p15, 0, r0, c1, c0, 0 @ and read it back to
824 sub pc, lr, r0, lsr #32 @ properly flush pipeline
827 #define PROC_ENTRY_SIZE (4*5)
830 * Here follow the relocatable cache support functions for the
831 * various processors. This is a generic hook for locating an
832 * entry and jumping to an instruction at the specified offset
833 * from the start of the block. Please note this is all position
843 call_cache_fn: adr r12, proc_types
844 #ifdef CONFIG_CPU_CP15
845 mrc p15, 0, r9, c0, c0 @ get processor ID
846 #elif defined(CONFIG_CPU_V7M)
848 * On v7-M the processor id is located in the V7M_SCB_CPUID
849 * register, but as cache handling is IMPLEMENTATION DEFINED on
850 * v7-M (if existant at all) we just return early here.
851 * If V7M_SCB_CPUID were used the cpu ID functions (i.e.
852 * __armv7_mmu_cache_{on,off,flush}) would be selected which
853 * use cp15 registers that are not implemented on v7-M.
857 ldr r9, =CONFIG_PROCESSOR_ID
859 1: ldr r1, [r12, #0] @ get value
860 ldr r2, [r12, #4] @ get mask
861 eor r1, r1, r9 @ (real ^ match)
863 ARM( addeq pc, r12, r3 ) @ call cache function
864 THUMB( addeq r12, r3 )
865 THUMB( moveq pc, r12 ) @ call cache function
866 add r12, r12, #PROC_ENTRY_SIZE
870 * Table for cache operations. This is basically:
873 * - 'cache on' method instruction
874 * - 'cache off' method instruction
875 * - 'cache flush' method instruction
877 * We match an entry using: ((real_id ^ match) & mask) == 0
879 * Writethrough caches generally only need 'on' and 'off'
880 * methods. Writeback caches _must_ have the flush method
884 .type proc_types,#object
886 .word 0x41000000 @ old ARM ID
895 .word 0x41007000 @ ARM7/710
904 .word 0x41807200 @ ARM720T (writethrough)
906 W(b) __armv4_mmu_cache_on
907 W(b) __armv4_mmu_cache_off
911 .word 0x41007400 @ ARM74x
913 W(b) __armv3_mpu_cache_on
914 W(b) __armv3_mpu_cache_off
915 W(b) __armv3_mpu_cache_flush
917 .word 0x41009400 @ ARM94x
919 W(b) __armv4_mpu_cache_on
920 W(b) __armv4_mpu_cache_off
921 W(b) __armv4_mpu_cache_flush
923 .word 0x41069260 @ ARM926EJ-S (v5TEJ)
925 W(b) __arm926ejs_mmu_cache_on
926 W(b) __armv4_mmu_cache_off
927 W(b) __armv5tej_mmu_cache_flush
929 .word 0x00007000 @ ARM7 IDs
938 @ Everything from here on will be the new ID system.
940 .word 0x4401a100 @ sa110 / sa1100
942 W(b) __armv4_mmu_cache_on
943 W(b) __armv4_mmu_cache_off
944 W(b) __armv4_mmu_cache_flush
946 .word 0x6901b110 @ sa1110
948 W(b) __armv4_mmu_cache_on
949 W(b) __armv4_mmu_cache_off
950 W(b) __armv4_mmu_cache_flush
953 .word 0xffffff00 @ PXA9xx
954 W(b) __armv4_mmu_cache_on
955 W(b) __armv4_mmu_cache_off
956 W(b) __armv4_mmu_cache_flush
958 .word 0x56158000 @ PXA168
960 W(b) __armv4_mmu_cache_on
961 W(b) __armv4_mmu_cache_off
962 W(b) __armv5tej_mmu_cache_flush
964 .word 0x56050000 @ Feroceon
966 W(b) __armv4_mmu_cache_on
967 W(b) __armv4_mmu_cache_off
968 W(b) __armv5tej_mmu_cache_flush
970 #ifdef CONFIG_CPU_FEROCEON_OLD_ID
971 /* this conflicts with the standard ARMv5TE entry */
972 .long 0x41009260 @ Old Feroceon
974 b __armv4_mmu_cache_on
975 b __armv4_mmu_cache_off
976 b __armv5tej_mmu_cache_flush
979 .word 0x66015261 @ FA526
981 W(b) __fa526_cache_on
982 W(b) __armv4_mmu_cache_off
983 W(b) __fa526_cache_flush
985 @ These match on the architecture ID
987 .word 0x00020000 @ ARMv4T
989 W(b) __armv4_mmu_cache_on
990 W(b) __armv4_mmu_cache_off
991 W(b) __armv4_mmu_cache_flush
993 .word 0x00050000 @ ARMv5TE
995 W(b) __armv4_mmu_cache_on
996 W(b) __armv4_mmu_cache_off
997 W(b) __armv4_mmu_cache_flush
999 .word 0x00060000 @ ARMv5TEJ
1001 W(b) __armv4_mmu_cache_on
1002 W(b) __armv4_mmu_cache_off
1003 W(b) __armv5tej_mmu_cache_flush
1005 .word 0x0007b000 @ ARMv6
1007 W(b) __armv6_mmu_cache_on
1008 W(b) __armv4_mmu_cache_off
1009 W(b) __armv6_mmu_cache_flush
1011 .word 0x000f0000 @ new CPU Id
1013 W(b) __armv7_mmu_cache_on
1014 W(b) __armv7_mmu_cache_off
1015 W(b) __armv7_mmu_cache_flush
1017 .word 0 @ unrecognised type
1026 .size proc_types, . - proc_types
1029 * If you get a "non-constant expression in ".if" statement"
1030 * error from the assembler on this line, check that you have
1031 * not accidentally written a "b" instruction where you should
1032 * have written W(b).
1034 .if (. - proc_types) % PROC_ENTRY_SIZE != 0
1035 .error "The size of one or more proc_types entries is wrong."
1039 * Turn off the Cache and MMU. ARMv3 does not support
1040 * reading the control register, but ARMv4 does.
1043 * r0, r1, r2, r3, r9, r12 corrupted
1044 * This routine must preserve:
1048 cache_off: mov r3, #12 @ cache_off function
1051 __armv4_mpu_cache_off:
1052 mrc p15, 0, r0, c1, c0
1054 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
1056 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
1057 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
1058 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
1061 __armv3_mpu_cache_off:
1062 mrc p15, 0, r0, c1, c0
1064 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
1066 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
1069 __armv4_mmu_cache_off:
1071 mrc p15, 0, r0, c1, c0
1073 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
1075 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
1076 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
1080 __armv7_mmu_cache_off:
1081 mrc p15, 0, r0, c1, c0
1087 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
1089 bl __armv7_mmu_cache_flush
1092 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
1094 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
1095 mcr p15, 0, r0, c7, c10, 4 @ DSB
1096 mcr p15, 0, r0, c7, c5, 4 @ ISB
1100 * Clean and flush the cache to maintain consistency.
1103 * r1, r2, r3, r9, r10, r11, r12 corrupted
1104 * This routine must preserve:
1112 __armv4_mpu_cache_flush:
1117 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
1118 mov r1, #7 << 5 @ 8 segments
1119 1: orr r3, r1, #63 << 26 @ 64 entries
1120 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
1121 subs r3, r3, #1 << 26
1122 bcs 2b @ entries 63 to 0
1123 subs r1, r1, #1 << 5
1124 bcs 1b @ segments 7 to 0
1127 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
1128 mcr p15, 0, ip, c7, c10, 4 @ drain WB
1131 __fa526_cache_flush:
1135 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
1136 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
1137 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1140 __armv6_mmu_cache_flush:
1143 mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D
1144 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
1145 mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
1146 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1149 __armv7_mmu_cache_flush:
1152 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
1153 tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
1156 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
1159 mcr p15, 0, r10, c7, c10, 5 @ DMB
1160 stmfd sp!, {r0-r7, r9-r11}
1161 mrc p15, 1, r0, c0, c0, 1 @ read clidr
1162 ands r3, r0, #0x7000000 @ extract loc from clidr
1163 mov r3, r3, lsr #23 @ left align loc bit field
1164 beq finished @ if loc is 0, then no need to clean
1165 mov r10, #0 @ start clean at cache level 0
1167 add r2, r10, r10, lsr #1 @ work out 3x current cache level
1168 mov r1, r0, lsr r2 @ extract cache type bits from clidr
1169 and r1, r1, #7 @ mask of the bits for current cache only
1170 cmp r1, #2 @ see what cache we have at this level
1171 blt skip @ skip if no cache, or just i-cache
1172 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
1173 mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr
1174 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
1175 and r2, r1, #7 @ extract the length of the cache lines
1176 add r2, r2, #4 @ add 4 (line length offset)
1178 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
1179 clz r5, r4 @ find bit position of way size increment
1181 ands r7, r7, r1, lsr #13 @ extract max number of the index size
1183 mov r9, r4 @ create working copy of max way size
1185 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
1186 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
1187 THUMB( lsl r6, r9, r5 )
1188 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
1189 THUMB( lsl r6, r7, r2 )
1190 THUMB( orr r11, r11, r6 ) @ factor index number into r11
1191 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
1192 subs r9, r9, #1 @ decrement the way
1194 subs r7, r7, #1 @ decrement the index
1197 add r10, r10, #2 @ increment cache number
1201 ldmfd sp!, {r0-r7, r9-r11}
1202 mov r10, #0 @ swith back to cache level 0
1203 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
1205 mcr p15, 0, r10, c7, c10, 4 @ DSB
1206 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
1207 mcr p15, 0, r10, c7, c10, 4 @ DSB
1208 mcr p15, 0, r10, c7, c5, 4 @ ISB
1211 __armv5tej_mmu_cache_flush:
1214 1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
1216 mcr p15, 0, r0, c7, c5, 0 @ flush I cache
1217 mcr p15, 0, r0, c7, c10, 4 @ drain WB
1220 __armv4_mmu_cache_flush:
1223 mov r2, #64*1024 @ default: 32K dcache size (*2)
1224 mov r11, #32 @ default: 32 byte line size
1225 mrc p15, 0, r3, c0, c0, 1 @ read cache type
1226 teq r3, r9 @ cache ID register present?
1231 mov r2, r2, lsl r1 @ base dcache size *2
1232 tst r3, #1 << 14 @ test M bit
1233 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1
1237 mov r11, r11, lsl r3 @ cache line size in bytes
1240 bic r1, r1, #63 @ align to longest cache line
1243 ARM( ldr r3, [r1], r11 ) @ s/w flush D cache
1244 THUMB( ldr r3, [r1] ) @ s/w flush D cache
1245 THUMB( add r1, r1, r11 )
1249 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
1250 mcr p15, 0, r1, c7, c6, 0 @ flush D cache
1251 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1254 __armv3_mmu_cache_flush:
1255 __armv3_mpu_cache_flush:
1259 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
1263 * Various debugging routines for printing hex characters and
1264 * memory, which again must be relocatable.
1268 .type phexbuf,#object
1270 .size phexbuf, . - phexbuf
1272 @ phex corrupts {r0, r1, r2, r3}
1273 phex: adr r3, phexbuf
1287 @ puts corrupts {r0, r1, r2, r3}
1289 1: ldrb r2, [r0], #1
1302 @ putc corrupts {r0, r1, r2, r3}
1309 @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
1310 memdump: mov r12, r0
1313 2: mov r0, r11, lsl #2
1321 ldr r0, [r12, r11, lsl #2]
1341 #ifdef CONFIG_ARM_VIRT_EXT
1343 __hyp_reentry_vectors:
1349 W(b) __enter_kernel @ hyp
1352 #endif /* CONFIG_ARM_VIRT_EXT */
1355 mov r0, #0 @ must be 0
1356 ARM( mov pc, r4 ) @ call kernel
1357 M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class
1358 THUMB( bx r4 ) @ entry point is always ARM for A/R classes
1362 #ifdef CONFIG_EFI_STUB
1364 _start: .long start - .
1366 ENTRY(efi_stub_entry)
1367 @ allocate space on stack for passing current zImage address
1368 @ and for the EFI stub to return of new entry point of
1369 @ zImage, as EFI stub may copy the kernel. Pointer address
1370 @ is passed in r2. r0 and r1 are passed through from the
1371 @ EFI firmware to efi_entry
1376 mov r2, sp @ pass zImage address in r2
1379 @ Check for error return from EFI stub. r0 has FDT address
1384 @ Preserve return value of efi_entry() in r4
1387 @ our cache maintenance code relies on CP15 barrier instructions
1388 @ but since we arrived here with the MMU and caches configured
1389 @ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
1390 @ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
1391 @ the enable path will be executed on v7+ only.
1392 mrc p15, 0, r1, c1, c0, 0 @ read SCTLR
1393 tst r1, #(1 << 5) @ CP15BEN bit set?
1395 orr r1, r1, #(1 << 5) @ CP15 barrier instructions
1396 mcr p15, 0, r1, c1, c0, 0 @ write SCTLR
1397 ARM( .inst 0xf57ff06f @ v7+ isb )
1400 0: bl cache_clean_flush
1403 @ Set parameters for booting zImage according to boot protocol
1404 @ put FDT address in r2, it was returned by efi_entry()
1405 @ r1 is the machine type, and r0 needs to be 0
1410 @ Branch to (possibly) relocated zImage that is in [sp]
1412 ldr ip, =start_offset
1414 mov pc, lr @ no mode switch
1417 @ Return EFI_LOAD_ERROR to EFI firmware on error.
1420 ENDPROC(efi_stub_entry)
1424 .section ".stack", "aw", %nobits
1425 .L_user_stack: .space 4096