GNU Linux-libre 4.14.266-gnu1
[releases.git] / arch / openrisc / kernel / head.S
1 /*
2  * OpenRISC head.S
3  *
4  * Linux architectural port borrowing liberally from similar works of
5  * others.  All original copyrights apply as per the original source
6  * declaration.
7  *
8  * Modifications for the OpenRISC architecture:
9  * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
10  * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
11  *
12  *      This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  */
17
18 #include <linux/linkage.h>
19 #include <linux/threads.h>
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/serial_reg.h>
23 #include <asm/processor.h>
24 #include <asm/page.h>
25 #include <asm/mmu.h>
26 #include <asm/pgtable.h>
27 #include <asm/thread_info.h>
28 #include <asm/cache.h>
29 #include <asm/spr_defs.h>
30 #include <asm/asm-offsets.h>
31 #include <linux/of_fdt.h>
32
33 #define tophys(rd,rs)                           \
34         l.movhi rd,hi(-KERNELBASE)              ;\
35         l.add   rd,rd,rs
36
37 #define CLEAR_GPR(gpr)                          \
38         l.movhi gpr,0x0
39
40 #define LOAD_SYMBOL_2_GPR(gpr,symbol)           \
41         l.movhi gpr,hi(symbol)                  ;\
42         l.ori   gpr,gpr,lo(symbol)
43
44
45 #define UART_BASE_ADD      0x90000000
46
47 #define EXCEPTION_SR  (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_SM)
48 #define SYSCALL_SR  (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_IEE | SPR_SR_TEE | SPR_SR_SM)
49
50 /* ============================================[ tmp store locations ]=== */
51
52 /*
53  * emergency_print temporary stores
54  */
55 #define EMERGENCY_PRINT_STORE_GPR4      l.sw    0x20(r0),r4
56 #define EMERGENCY_PRINT_LOAD_GPR4       l.lwz   r4,0x20(r0)
57
58 #define EMERGENCY_PRINT_STORE_GPR5      l.sw    0x24(r0),r5
59 #define EMERGENCY_PRINT_LOAD_GPR5       l.lwz   r5,0x24(r0)
60
61 #define EMERGENCY_PRINT_STORE_GPR6      l.sw    0x28(r0),r6
62 #define EMERGENCY_PRINT_LOAD_GPR6       l.lwz   r6,0x28(r0)
63
64 #define EMERGENCY_PRINT_STORE_GPR7      l.sw    0x2c(r0),r7
65 #define EMERGENCY_PRINT_LOAD_GPR7       l.lwz   r7,0x2c(r0)
66
67 #define EMERGENCY_PRINT_STORE_GPR8      l.sw    0x30(r0),r8
68 #define EMERGENCY_PRINT_LOAD_GPR8       l.lwz   r8,0x30(r0)
69
70 #define EMERGENCY_PRINT_STORE_GPR9      l.sw    0x34(r0),r9
71 #define EMERGENCY_PRINT_LOAD_GPR9       l.lwz   r9,0x34(r0)
72
73
74 /*
75  * TLB miss handlers temorary stores
76  */
77 #define EXCEPTION_STORE_GPR9            l.sw    0x10(r0),r9
78 #define EXCEPTION_LOAD_GPR9             l.lwz   r9,0x10(r0)
79
80 #define EXCEPTION_STORE_GPR2            l.sw    0x64(r0),r2
81 #define EXCEPTION_LOAD_GPR2             l.lwz   r2,0x64(r0)
82
83 #define EXCEPTION_STORE_GPR3            l.sw    0x68(r0),r3
84 #define EXCEPTION_LOAD_GPR3             l.lwz   r3,0x68(r0)
85
86 #define EXCEPTION_STORE_GPR4            l.sw    0x6c(r0),r4
87 #define EXCEPTION_LOAD_GPR4             l.lwz   r4,0x6c(r0)
88
89 #define EXCEPTION_STORE_GPR5            l.sw    0x70(r0),r5
90 #define EXCEPTION_LOAD_GPR5             l.lwz   r5,0x70(r0)
91
92 #define EXCEPTION_STORE_GPR6            l.sw    0x74(r0),r6
93 #define EXCEPTION_LOAD_GPR6             l.lwz   r6,0x74(r0)
94
95
96 /*
97  * EXCEPTION_HANDLE temporary stores
98  */
99
100 #define EXCEPTION_T_STORE_GPR30         l.sw    0x78(r0),r30
101 #define EXCEPTION_T_LOAD_GPR30(reg)     l.lwz   reg,0x78(r0)
102
103 #define EXCEPTION_T_STORE_GPR10         l.sw    0x7c(r0),r10
104 #define EXCEPTION_T_LOAD_GPR10(reg)     l.lwz   reg,0x7c(r0)
105
106 #define EXCEPTION_T_STORE_SP            l.sw    0x80(r0),r1
107 #define EXCEPTION_T_LOAD_SP(reg)        l.lwz   reg,0x80(r0)
108
109 /*
110  * For UNHANLDED_EXCEPTION
111  */
112
113 #define EXCEPTION_T_STORE_GPR31         l.sw    0x84(r0),r31
114 #define EXCEPTION_T_LOAD_GPR31(reg)     l.lwz   reg,0x84(r0)
115
116 /* =========================================================[ macros ]=== */
117
118
119 #define GET_CURRENT_PGD(reg,t1)                                 \
120         LOAD_SYMBOL_2_GPR(reg,current_pgd)                      ;\
121         tophys  (t1,reg)                                        ;\
122         l.lwz   reg,0(t1)
123
124
125 /*
126  * DSCR: this is a common hook for handling exceptions. it will save
127  *       the needed registers, set up stack and pointer to current
128  *       then jump to the handler while enabling MMU
129  *
130  * PRMS: handler        - a function to jump to. it has to save the
131  *                      remaining registers to kernel stack, call
132  *                      appropriate arch-independant exception handler
133  *                      and finaly jump to ret_from_except
134  *
135  * PREQ: unchanged state from the time exception happened
136  *
137  * POST: SAVED the following registers original value
138  *             to the new created exception frame pointed to by r1
139  *
140  *       r1  - ksp      pointing to the new (exception) frame
141  *       r4  - EEAR     exception EA
142  *       r10 - current  pointing to current_thread_info struct
143  *       r12 - syscall  0, since we didn't come from syscall
144  *       r30 - handler  address of the handler we'll jump to
145  *
146  *       handler has to save remaining registers to the exception
147  *       ksp frame *before* tainting them!
148  *
149  * NOTE: this function is not reentrant per se. reentrancy is guaranteed
150  *       by processor disabling all exceptions/interrupts when exception
151  *       accours.
152  *
153  * OPTM: no need to make it so wasteful to extract ksp when in user mode
154  */
155
156 #define EXCEPTION_HANDLE(handler)                               \
157         EXCEPTION_T_STORE_GPR30                                 ;\
158         l.mfspr r30,r0,SPR_ESR_BASE                             ;\
159         l.andi  r30,r30,SPR_SR_SM                               ;\
160         l.sfeqi r30,0                                           ;\
161         EXCEPTION_T_STORE_GPR10                                 ;\
162         l.bnf   2f                            /* kernel_mode */ ;\
163          EXCEPTION_T_STORE_SP                 /* delay slot */  ;\
164 1: /* user_mode:   */                                           ;\
165         LOAD_SYMBOL_2_GPR(r1,current_thread_info_set)           ;\
166         tophys  (r30,r1)                                        ;\
167         /* r10: current_thread_info  */                         ;\
168         l.lwz   r10,0(r30)                                      ;\
169         tophys  (r30,r10)                                       ;\
170         l.lwz   r1,(TI_KSP)(r30)                                ;\
171         /* fall through */                                      ;\
172 2: /* kernel_mode: */                                           ;\
173         /* create new stack frame, save only needed gprs */     ;\
174         /* r1: KSP, r10: current, r4: EEAR, r31: __pa(KSP) */   ;\
175         /* r12: temp, syscall indicator */                      ;\
176         l.addi  r1,r1,-(INT_FRAME_SIZE)                         ;\
177         /* r1 is KSP, r30 is __pa(KSP) */                       ;\
178         tophys  (r30,r1)                                        ;\
179         l.sw    PT_GPR12(r30),r12                               ;\
180         /* r4 use for tmp before EA */                          ;\
181         l.mfspr r12,r0,SPR_EPCR_BASE                            ;\
182         l.sw    PT_PC(r30),r12                                  ;\
183         l.mfspr r12,r0,SPR_ESR_BASE                             ;\
184         l.sw    PT_SR(r30),r12                                  ;\
185         /* save r30 */                                          ;\
186         EXCEPTION_T_LOAD_GPR30(r12)                             ;\
187         l.sw    PT_GPR30(r30),r12                               ;\
188         /* save r10 as was prior to exception */                ;\
189         EXCEPTION_T_LOAD_GPR10(r12)                             ;\
190         l.sw    PT_GPR10(r30),r12                               ;\
191         /* save PT_SP as was prior to exception */              ;\
192         EXCEPTION_T_LOAD_SP(r12)                                ;\
193         l.sw    PT_SP(r30),r12                                  ;\
194         /* save exception r4, set r4 = EA */                    ;\
195         l.sw    PT_GPR4(r30),r4                                 ;\
196         l.mfspr r4,r0,SPR_EEAR_BASE                             ;\
197         /* r12 == 1 if we come from syscall */                  ;\
198         CLEAR_GPR(r12)                                          ;\
199         /* ----- turn on MMU ----- */                           ;\
200         /* Carry DSX into exception SR */                       ;\
201         l.mfspr r30,r0,SPR_SR                                   ;\
202         l.andi  r30,r30,SPR_SR_DSX                              ;\
203         l.ori   r30,r30,(EXCEPTION_SR)                          ;\
204         l.mtspr r0,r30,SPR_ESR_BASE                             ;\
205         /* r30: EA address of handler */                        ;\
206         LOAD_SYMBOL_2_GPR(r30,handler)                          ;\
207         l.mtspr r0,r30,SPR_EPCR_BASE                            ;\
208         l.rfe
209
210 /*
211  * this doesn't work
212  *
213  *
214  * #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION
215  * #define UNHANDLED_EXCEPTION(handler)                         \
216  *      l.ori   r3,r0,0x1                                       ;\
217  *      l.mtspr r0,r3,SPR_SR                                    ;\
218  *      l.movhi r3,hi(0xf0000100)                               ;\
219  *      l.ori   r3,r3,lo(0xf0000100)                            ;\
220  *      l.jr    r3                                              ;\
221  *      l.nop   1
222  *
223  * #endif
224  */
225
226 /* DSCR: this is the same as EXCEPTION_HANDLE(), we are just
227  *       a bit more carefull (if we have a PT_SP or current pointer
228  *       corruption) and set them up from 'current_set'
229  *
230  */
231 #define UNHANDLED_EXCEPTION(handler)                            \
232         EXCEPTION_T_STORE_GPR31                                 ;\
233         EXCEPTION_T_STORE_GPR10                                 ;\
234         EXCEPTION_T_STORE_SP                                    ;\
235         /* temporary store r3, r9 into r1, r10 */               ;\
236         l.addi  r1,r3,0x0                                       ;\
237         l.addi  r10,r9,0x0                                      ;\
238         /* the string referenced by r3 must be low enough */    ;\
239         l.jal   _emergency_print                                ;\
240         l.ori   r3,r0,lo(_string_unhandled_exception)           ;\
241         l.mfspr r3,r0,SPR_NPC                                   ;\
242         l.jal   _emergency_print_nr                             ;\
243         l.andi  r3,r3,0x1f00                                    ;\
244         /* the string referenced by r3 must be low enough */    ;\
245         l.jal   _emergency_print                                ;\
246         l.ori   r3,r0,lo(_string_epc_prefix)                    ;\
247         l.jal   _emergency_print_nr                             ;\
248         l.mfspr r3,r0,SPR_EPCR_BASE                             ;\
249         l.jal   _emergency_print                                ;\
250         l.ori   r3,r0,lo(_string_nl)                            ;\
251         /* end of printing */                                   ;\
252         l.addi  r3,r1,0x0                                       ;\
253         l.addi  r9,r10,0x0                                      ;\
254         /* extract current, ksp from current_set */             ;\
255         LOAD_SYMBOL_2_GPR(r1,_unhandled_stack_top)              ;\
256         LOAD_SYMBOL_2_GPR(r10,init_thread_union)                ;\
257         /* create new stack frame, save only needed gprs */     ;\
258         /* r1: KSP, r10: current, r31: __pa(KSP) */             ;\
259         /* r12: temp, syscall indicator, r13 temp */            ;\
260         l.addi  r1,r1,-(INT_FRAME_SIZE)                         ;\
261         /* r1 is KSP, r31 is __pa(KSP) */                       ;\
262         tophys  (r31,r1)                                        ;\
263         l.sw    PT_GPR12(r31),r12                                       ;\
264         l.mfspr r12,r0,SPR_EPCR_BASE                            ;\
265         l.sw    PT_PC(r31),r12                                  ;\
266         l.mfspr r12,r0,SPR_ESR_BASE                             ;\
267         l.sw    PT_SR(r31),r12                                  ;\
268         /* save r31 */                                          ;\
269         EXCEPTION_T_LOAD_GPR31(r12)                             ;\
270         l.sw    PT_GPR31(r31),r12                                       ;\
271         /* save r10 as was prior to exception */                ;\
272         EXCEPTION_T_LOAD_GPR10(r12)                             ;\
273         l.sw    PT_GPR10(r31),r12                                       ;\
274         /* save PT_SP as was prior to exception */                      ;\
275         EXCEPTION_T_LOAD_SP(r12)                                ;\
276         l.sw    PT_SP(r31),r12                                  ;\
277         l.sw    PT_GPR13(r31),r13                                       ;\
278         /* --> */                                               ;\
279         /* save exception r4, set r4 = EA */                    ;\
280         l.sw    PT_GPR4(r31),r4                                 ;\
281         l.mfspr r4,r0,SPR_EEAR_BASE                             ;\
282         /* r12 == 1 if we come from syscall */                  ;\
283         CLEAR_GPR(r12)                                          ;\
284         /* ----- play a MMU trick ----- */                      ;\
285         l.ori   r31,r0,(EXCEPTION_SR)                           ;\
286         l.mtspr r0,r31,SPR_ESR_BASE                             ;\
287         /* r31: EA address of handler */                        ;\
288         LOAD_SYMBOL_2_GPR(r31,handler)                          ;\
289         l.mtspr r0,r31,SPR_EPCR_BASE                            ;\
290         l.rfe
291
292 /* =====================================================[ exceptions] === */
293
294 /* ---[ 0x100: RESET exception ]----------------------------------------- */
295     .org 0x100
296         /* Jump to .init code at _start which lives in the .head section
297          * and will be discarded after boot.
298          */
299         LOAD_SYMBOL_2_GPR(r15, _start)
300         tophys  (r13,r15)                       /* MMU disabled */
301         l.jr    r13
302          l.nop
303
304 /* ---[ 0x200: BUS exception ]------------------------------------------- */
305     .org 0x200
306 _dispatch_bus_fault:
307         EXCEPTION_HANDLE(_bus_fault_handler)
308
309 /* ---[ 0x300: Data Page Fault exception ]------------------------------- */
310     .org 0x300
311 _dispatch_do_dpage_fault:
312 //      totaly disable timer interrupt
313 //      l.mtspr r0,r0,SPR_TTMR
314 //      DEBUG_TLB_PROBE(0x300)
315 //      EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x300)
316         EXCEPTION_HANDLE(_data_page_fault_handler)
317
318 /* ---[ 0x400: Insn Page Fault exception ]------------------------------- */
319     .org 0x400
320 _dispatch_do_ipage_fault:
321 //      totaly disable timer interrupt
322 //      l.mtspr r0,r0,SPR_TTMR
323 //      DEBUG_TLB_PROBE(0x400)
324 //      EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x400)
325         EXCEPTION_HANDLE(_insn_page_fault_handler)
326
327 /* ---[ 0x500: Timer exception ]----------------------------------------- */
328     .org 0x500
329         EXCEPTION_HANDLE(_timer_handler)
330
331 /* ---[ 0x600: Alignment exception ]-------------------------------------- */
332     .org 0x600
333         EXCEPTION_HANDLE(_alignment_handler)
334
335 /* ---[ 0x700: Illegal insn exception ]---------------------------------- */
336     .org 0x700
337         EXCEPTION_HANDLE(_illegal_instruction_handler)
338
339 /* ---[ 0x800: External interrupt exception ]---------------------------- */
340     .org 0x800
341         EXCEPTION_HANDLE(_external_irq_handler)
342
343 /* ---[ 0x900: DTLB miss exception ]------------------------------------- */
344     .org 0x900
345         l.j     boot_dtlb_miss_handler
346         l.nop
347
348 /* ---[ 0xa00: ITLB miss exception ]------------------------------------- */
349     .org 0xa00
350         l.j     boot_itlb_miss_handler
351         l.nop
352
353 /* ---[ 0xb00: Range exception ]----------------------------------------- */
354     .org 0xb00
355         UNHANDLED_EXCEPTION(_vector_0xb00)
356
357 /* ---[ 0xc00: Syscall exception ]--------------------------------------- */
358     .org 0xc00
359         EXCEPTION_HANDLE(_sys_call_handler)
360
361 /* ---[ 0xd00: Trap exception ]------------------------------------------ */
362     .org 0xd00
363         UNHANDLED_EXCEPTION(_vector_0xd00)
364
365 /* ---[ 0xe00: Trap exception ]------------------------------------------ */
366     .org 0xe00
367 //      UNHANDLED_EXCEPTION(_vector_0xe00)
368         EXCEPTION_HANDLE(_trap_handler)
369
370 /* ---[ 0xf00: Reserved exception ]-------------------------------------- */
371     .org 0xf00
372         UNHANDLED_EXCEPTION(_vector_0xf00)
373
374 /* ---[ 0x1000: Reserved exception ]------------------------------------- */
375     .org 0x1000
376         UNHANDLED_EXCEPTION(_vector_0x1000)
377
378 /* ---[ 0x1100: Reserved exception ]------------------------------------- */
379     .org 0x1100
380         UNHANDLED_EXCEPTION(_vector_0x1100)
381
382 /* ---[ 0x1200: Reserved exception ]------------------------------------- */
383     .org 0x1200
384         UNHANDLED_EXCEPTION(_vector_0x1200)
385
386 /* ---[ 0x1300: Reserved exception ]------------------------------------- */
387     .org 0x1300
388         UNHANDLED_EXCEPTION(_vector_0x1300)
389
390 /* ---[ 0x1400: Reserved exception ]------------------------------------- */
391     .org 0x1400
392         UNHANDLED_EXCEPTION(_vector_0x1400)
393
394 /* ---[ 0x1500: Reserved exception ]------------------------------------- */
395     .org 0x1500
396         UNHANDLED_EXCEPTION(_vector_0x1500)
397
398 /* ---[ 0x1600: Reserved exception ]------------------------------------- */
399     .org 0x1600
400         UNHANDLED_EXCEPTION(_vector_0x1600)
401
402 /* ---[ 0x1700: Reserved exception ]------------------------------------- */
403     .org 0x1700
404         UNHANDLED_EXCEPTION(_vector_0x1700)
405
406 /* ---[ 0x1800: Reserved exception ]------------------------------------- */
407     .org 0x1800
408         UNHANDLED_EXCEPTION(_vector_0x1800)
409
410 /* ---[ 0x1900: Reserved exception ]------------------------------------- */
411     .org 0x1900
412         UNHANDLED_EXCEPTION(_vector_0x1900)
413
414 /* ---[ 0x1a00: Reserved exception ]------------------------------------- */
415     .org 0x1a00
416         UNHANDLED_EXCEPTION(_vector_0x1a00)
417
418 /* ---[ 0x1b00: Reserved exception ]------------------------------------- */
419     .org 0x1b00
420         UNHANDLED_EXCEPTION(_vector_0x1b00)
421
422 /* ---[ 0x1c00: Reserved exception ]------------------------------------- */
423     .org 0x1c00
424         UNHANDLED_EXCEPTION(_vector_0x1c00)
425
426 /* ---[ 0x1d00: Reserved exception ]------------------------------------- */
427     .org 0x1d00
428         UNHANDLED_EXCEPTION(_vector_0x1d00)
429
430 /* ---[ 0x1e00: Reserved exception ]------------------------------------- */
431     .org 0x1e00
432         UNHANDLED_EXCEPTION(_vector_0x1e00)
433
434 /* ---[ 0x1f00: Reserved exception ]------------------------------------- */
435     .org 0x1f00
436         UNHANDLED_EXCEPTION(_vector_0x1f00)
437
438     .org 0x2000
439 /* ===================================================[ kernel start ]=== */
440
441 /*    .text*/
442
443 /* This early stuff belongs in HEAD, but some of the functions below definitely
444  * don't... */
445
446         __HEAD
447         .global _start
448 _start:
449         /* Init r0 to zero as per spec */
450         CLEAR_GPR(r0)
451
452         /* save kernel parameters */
453         l.or    r25,r0,r3       /* pointer to fdt */
454
455         /*
456          * ensure a deterministic start
457          */
458
459         l.ori   r3,r0,0x1
460         l.mtspr r0,r3,SPR_SR
461
462         CLEAR_GPR(r1)
463         CLEAR_GPR(r2)
464         CLEAR_GPR(r3)
465         CLEAR_GPR(r4)
466         CLEAR_GPR(r5)
467         CLEAR_GPR(r6)
468         CLEAR_GPR(r7)
469         CLEAR_GPR(r8)
470         CLEAR_GPR(r9)
471         CLEAR_GPR(r10)
472         CLEAR_GPR(r11)
473         CLEAR_GPR(r12)
474         CLEAR_GPR(r13)
475         CLEAR_GPR(r14)
476         CLEAR_GPR(r15)
477         CLEAR_GPR(r16)
478         CLEAR_GPR(r17)
479         CLEAR_GPR(r18)
480         CLEAR_GPR(r19)
481         CLEAR_GPR(r20)
482         CLEAR_GPR(r21)
483         CLEAR_GPR(r22)
484         CLEAR_GPR(r23)
485         CLEAR_GPR(r24)
486         CLEAR_GPR(r26)
487         CLEAR_GPR(r27)
488         CLEAR_GPR(r28)
489         CLEAR_GPR(r29)
490         CLEAR_GPR(r30)
491         CLEAR_GPR(r31)
492
493         /*
494          * set up initial ksp and current
495          */
496         /* setup kernel stack */
497         LOAD_SYMBOL_2_GPR(r1,init_thread_union + THREAD_SIZE)
498         LOAD_SYMBOL_2_GPR(r10,init_thread_union)        // setup current
499         tophys  (r31,r10)
500         l.sw    TI_KSP(r31), r1
501
502         l.ori   r4,r0,0x0
503
504
505         /*
506          * .data contains initialized data,
507          * .bss contains uninitialized data - clear it up
508          */
509 clear_bss:
510         LOAD_SYMBOL_2_GPR(r24, __bss_start)
511         LOAD_SYMBOL_2_GPR(r26, _end)
512         tophys(r28,r24)
513         tophys(r30,r26)
514         CLEAR_GPR(r24)
515         CLEAR_GPR(r26)
516 1:
517         l.sw    (0)(r28),r0
518         l.sfltu r28,r30
519         l.bf    1b
520         l.addi  r28,r28,4
521
522 enable_ic:
523         l.jal   _ic_enable
524          l.nop
525
526 enable_dc:
527         l.jal   _dc_enable
528          l.nop
529
530 flush_tlb:
531         l.jal   _flush_tlb
532          l.nop
533
534 /* The MMU needs to be enabled before or32_early_setup is called */
535
536 enable_mmu:
537         /*
538          * enable dmmu & immu
539          * SR[5] = 0, SR[6] = 0, 6th and 7th bit of SR set to 0
540          */
541         l.mfspr r30,r0,SPR_SR
542         l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME)
543         l.ori   r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
544         l.or    r30,r30,r28
545         l.mtspr r0,r30,SPR_SR
546         l.nop
547         l.nop
548         l.nop
549         l.nop
550         l.nop
551         l.nop
552         l.nop
553         l.nop
554         l.nop
555         l.nop
556         l.nop
557         l.nop
558         l.nop
559         l.nop
560         l.nop
561         l.nop
562
563         // reset the simulation counters
564         l.nop 5
565
566         /* check fdt header magic word */
567         l.lwz   r3,0(r25)       /* load magic from fdt into r3 */
568         l.movhi r4,hi(OF_DT_HEADER)
569         l.ori   r4,r4,lo(OF_DT_HEADER)
570         l.sfeq  r3,r4
571         l.bf    _fdt_found
572          l.nop
573         /* magic number mismatch, set fdt pointer to null */
574         l.or    r25,r0,r0
575 _fdt_found:
576         /* pass fdt pointer to or32_early_setup in r3 */
577         l.or    r3,r0,r25
578         LOAD_SYMBOL_2_GPR(r24, or32_early_setup)
579         l.jalr r24
580          l.nop
581
582 clear_regs:
583         /*
584          * clear all GPRS to increase determinism
585          */
586         CLEAR_GPR(r2)
587         CLEAR_GPR(r3)
588         CLEAR_GPR(r4)
589         CLEAR_GPR(r5)
590         CLEAR_GPR(r6)
591         CLEAR_GPR(r7)
592         CLEAR_GPR(r8)
593         CLEAR_GPR(r9)
594         CLEAR_GPR(r11)
595         CLEAR_GPR(r12)
596         CLEAR_GPR(r13)
597         CLEAR_GPR(r14)
598         CLEAR_GPR(r15)
599         CLEAR_GPR(r16)
600         CLEAR_GPR(r17)
601         CLEAR_GPR(r18)
602         CLEAR_GPR(r19)
603         CLEAR_GPR(r20)
604         CLEAR_GPR(r21)
605         CLEAR_GPR(r22)
606         CLEAR_GPR(r23)
607         CLEAR_GPR(r24)
608         CLEAR_GPR(r25)
609         CLEAR_GPR(r26)
610         CLEAR_GPR(r27)
611         CLEAR_GPR(r28)
612         CLEAR_GPR(r29)
613         CLEAR_GPR(r30)
614         CLEAR_GPR(r31)
615
616 jump_start_kernel:
617         /*
618          * jump to kernel entry (start_kernel)
619          */
620         LOAD_SYMBOL_2_GPR(r30, start_kernel)
621         l.jr    r30
622          l.nop
623
624 _flush_tlb:
625         /*
626          *  I N V A L I D A T E   T L B   e n t r i e s
627          */
628         LOAD_SYMBOL_2_GPR(r5,SPR_DTLBMR_BASE(0))
629         LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0))
630         l.addi  r7,r0,128 /* Maximum number of sets */
631 1:
632         l.mtspr r5,r0,0x0
633         l.mtspr r6,r0,0x0
634
635         l.addi  r5,r5,1
636         l.addi  r6,r6,1
637         l.sfeq  r7,r0
638         l.bnf   1b
639          l.addi r7,r7,-1
640
641         l.jr    r9
642          l.nop
643
644 /* ========================================[ cache ]=== */
645
646         /* alignment here so we don't change memory offsets with
647          * memory controller defined
648          */
649         .align 0x2000
650
651 _ic_enable:
652         /* Check if IC present and skip enabling otherwise */
653         l.mfspr r24,r0,SPR_UPR
654         l.andi  r26,r24,SPR_UPR_ICP
655         l.sfeq  r26,r0
656         l.bf    9f
657         l.nop
658
659         /* Disable IC */
660         l.mfspr r6,r0,SPR_SR
661         l.addi  r5,r0,-1
662         l.xori  r5,r5,SPR_SR_ICE
663         l.and   r5,r6,r5
664         l.mtspr r0,r5,SPR_SR
665
666         /* Establish cache block size
667            If BS=0, 16;
668            If BS=1, 32;
669            r14 contain block size
670         */
671         l.mfspr r24,r0,SPR_ICCFGR
672         l.andi  r26,r24,SPR_ICCFGR_CBS
673         l.srli  r28,r26,7
674         l.ori   r30,r0,16
675         l.sll   r14,r30,r28
676
677         /* Establish number of cache sets
678            r16 contains number of cache sets
679            r28 contains log(# of cache sets)
680         */
681         l.andi  r26,r24,SPR_ICCFGR_NCS
682         l.srli  r28,r26,3
683         l.ori   r30,r0,1
684         l.sll   r16,r30,r28
685
686         /* Invalidate IC */
687         l.addi  r6,r0,0
688         l.sll   r5,r14,r28
689 //        l.mul   r5,r14,r16
690 //      l.trap  1
691 //      l.addi  r5,r0,IC_SIZE
692 1:
693         l.mtspr r0,r6,SPR_ICBIR
694         l.sfne  r6,r5
695         l.bf    1b
696         l.add   r6,r6,r14
697  //       l.addi   r6,r6,IC_LINE
698
699         /* Enable IC */
700         l.mfspr r6,r0,SPR_SR
701         l.ori   r6,r6,SPR_SR_ICE
702         l.mtspr r0,r6,SPR_SR
703         l.nop
704         l.nop
705         l.nop
706         l.nop
707         l.nop
708         l.nop
709         l.nop
710         l.nop
711         l.nop
712         l.nop
713 9:
714         l.jr    r9
715         l.nop
716
717 _dc_enable:
718         /* Check if DC present and skip enabling otherwise */
719         l.mfspr r24,r0,SPR_UPR
720         l.andi  r26,r24,SPR_UPR_DCP
721         l.sfeq  r26,r0
722         l.bf    9f
723         l.nop
724
725         /* Disable DC */
726         l.mfspr r6,r0,SPR_SR
727         l.addi  r5,r0,-1
728         l.xori  r5,r5,SPR_SR_DCE
729         l.and   r5,r6,r5
730         l.mtspr r0,r5,SPR_SR
731
732         /* Establish cache block size
733            If BS=0, 16;
734            If BS=1, 32;
735            r14 contain block size
736         */
737         l.mfspr r24,r0,SPR_DCCFGR
738         l.andi  r26,r24,SPR_DCCFGR_CBS
739         l.srli  r28,r26,7
740         l.ori   r30,r0,16
741         l.sll   r14,r30,r28
742
743         /* Establish number of cache sets
744            r16 contains number of cache sets
745            r28 contains log(# of cache sets)
746         */
747         l.andi  r26,r24,SPR_DCCFGR_NCS
748         l.srli  r28,r26,3
749         l.ori   r30,r0,1
750         l.sll   r16,r30,r28
751
752         /* Invalidate DC */
753         l.addi  r6,r0,0
754         l.sll   r5,r14,r28
755 1:
756         l.mtspr r0,r6,SPR_DCBIR
757         l.sfne  r6,r5
758         l.bf    1b
759         l.add   r6,r6,r14
760
761         /* Enable DC */
762         l.mfspr r6,r0,SPR_SR
763         l.ori   r6,r6,SPR_SR_DCE
764         l.mtspr r0,r6,SPR_SR
765 9:
766         l.jr    r9
767         l.nop
768
769 /* ===============================================[ page table masks ]=== */
770
771 #define DTLB_UP_CONVERT_MASK  0x3fa
772 #define ITLB_UP_CONVERT_MASK  0x3a
773
774 /* for SMP we'd have (this is a bit subtle, CC must be always set
775  * for SMP, but since we have _PAGE_PRESENT bit always defined
776  * we can just modify the mask)
777  */
778 #define DTLB_SMP_CONVERT_MASK  0x3fb
779 #define ITLB_SMP_CONVERT_MASK  0x3b
780
781 /* ---[ boot dtlb miss handler ]----------------------------------------- */
782
783 boot_dtlb_miss_handler:
784
785 /* mask for DTLB_MR register: - (0) sets V (valid) bit,
786  *                            - (31-12) sets bits belonging to VPN (31-12)
787  */
788 #define DTLB_MR_MASK 0xfffff001
789
790 /* mask for DTLB_TR register: - (2) sets CI (cache inhibit) bit,
791  *                            - (4) sets A (access) bit,
792  *                            - (5) sets D (dirty) bit,
793  *                            - (8) sets SRE (superuser read) bit
794  *                            - (9) sets SWE (superuser write) bit
795  *                            - (31-12) sets bits belonging to VPN (31-12)
796  */
797 #define DTLB_TR_MASK 0xfffff332
798
799 /* These are for masking out the VPN/PPN value from the MR/TR registers...
800  * it's not the same as the PFN */
801 #define VPN_MASK 0xfffff000
802 #define PPN_MASK 0xfffff000
803
804
805         EXCEPTION_STORE_GPR6
806
807 #if 0
808         l.mfspr r6,r0,SPR_ESR_BASE         //
809         l.andi  r6,r6,SPR_SR_SM            // are we in kernel mode ?
810         l.sfeqi r6,0                       // r6 == 0x1 --> SM
811         l.bf    exit_with_no_dtranslation  //
812         l.nop
813 #endif
814
815         /* this could be optimized by moving storing of
816          * non r6 registers here, and jumping r6 restore
817          * if not in supervisor mode
818          */
819
820         EXCEPTION_STORE_GPR2
821         EXCEPTION_STORE_GPR3
822         EXCEPTION_STORE_GPR4
823         EXCEPTION_STORE_GPR5
824
825         l.mfspr r4,r0,SPR_EEAR_BASE        // get the offending EA
826
827 immediate_translation:
828         CLEAR_GPR(r6)
829
830         l.srli  r3,r4,0xd                  // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
831
832         l.mfspr r6, r0, SPR_DMMUCFGR
833         l.andi  r6, r6, SPR_DMMUCFGR_NTS
834         l.srli  r6, r6, SPR_DMMUCFGR_NTS_OFF
835         l.ori   r5, r0, 0x1
836         l.sll   r5, r5, r6      // r5 = number DMMU sets
837         l.addi  r6, r5, -1      // r6 = nsets mask
838         l.and   r2, r3, r6      // r2 <- r3 % NSETS_MASK
839
840         l.or    r6,r6,r4                   // r6 <- r4
841         l.ori   r6,r6,~(VPN_MASK)          // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
842         l.movhi r5,hi(DTLB_MR_MASK)        // r5 <- ffff:0000.x000
843         l.ori   r5,r5,lo(DTLB_MR_MASK)     // r5 <- ffff:1111.x001 - apply DTLB_MR_MASK
844         l.and   r5,r5,r6                   // r5 <- VPN :VPN .x001 - we have DTLBMR entry
845         l.mtspr r2,r5,SPR_DTLBMR_BASE(0)   // set DTLBMR
846
847         /* set up DTLB with no translation for EA <= 0xbfffffff */
848         LOAD_SYMBOL_2_GPR(r6,0xbfffffff)
849         l.sfgeu  r6,r4                     // flag if r6 >= r4 (if 0xbfffffff >= EA)
850         l.bf     1f                        // goto out
851         l.and    r3,r4,r4                  // delay slot :: 24 <- r4 (if flag==1)
852
853         tophys(r3,r4)                      // r3 <- PA
854 1:
855         l.ori   r3,r3,~(PPN_MASK)          // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
856         l.movhi r5,hi(DTLB_TR_MASK)        // r5 <- ffff:0000.x000
857         l.ori   r5,r5,lo(DTLB_TR_MASK)     // r5 <- ffff:1111.x330 - apply DTLB_MR_MASK
858         l.and   r5,r5,r3                   // r5 <- PPN :PPN .x330 - we have DTLBTR entry
859         l.mtspr r2,r5,SPR_DTLBTR_BASE(0)   // set DTLBTR
860
861         EXCEPTION_LOAD_GPR6
862         EXCEPTION_LOAD_GPR5
863         EXCEPTION_LOAD_GPR4
864         EXCEPTION_LOAD_GPR3
865         EXCEPTION_LOAD_GPR2
866
867         l.rfe                              // SR <- ESR, PC <- EPC
868
869 exit_with_no_dtranslation:
870         /* EA out of memory or not in supervisor mode */
871         EXCEPTION_LOAD_GPR6
872         EXCEPTION_LOAD_GPR4
873         l.j     _dispatch_bus_fault
874
875 /* ---[ boot itlb miss handler ]----------------------------------------- */
876
877 boot_itlb_miss_handler:
878
879 /* mask for ITLB_MR register: - sets V (valid) bit,
880  *                            - sets bits belonging to VPN (15-12)
881  */
882 #define ITLB_MR_MASK 0xfffff001
883
884 /* mask for ITLB_TR register: - sets A (access) bit,
885  *                            - sets SXE (superuser execute) bit
886  *                            - sets bits belonging to VPN (15-12)
887  */
888 #define ITLB_TR_MASK 0xfffff050
889
890 /*
891 #define VPN_MASK 0xffffe000
892 #define PPN_MASK 0xffffe000
893 */
894
895
896
897         EXCEPTION_STORE_GPR2
898         EXCEPTION_STORE_GPR3
899         EXCEPTION_STORE_GPR4
900         EXCEPTION_STORE_GPR5
901         EXCEPTION_STORE_GPR6
902
903 #if 0
904         l.mfspr r6,r0,SPR_ESR_BASE         //
905         l.andi  r6,r6,SPR_SR_SM            // are we in kernel mode ?
906         l.sfeqi r6,0                       // r6 == 0x1 --> SM
907         l.bf    exit_with_no_itranslation
908         l.nop
909 #endif
910
911
912         l.mfspr r4,r0,SPR_EEAR_BASE        // get the offending EA
913
914 earlyearly:
915         CLEAR_GPR(r6)
916
917         l.srli  r3,r4,0xd                  // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
918
919         l.mfspr r6, r0, SPR_IMMUCFGR
920         l.andi  r6, r6, SPR_IMMUCFGR_NTS
921         l.srli  r6, r6, SPR_IMMUCFGR_NTS_OFF
922         l.ori   r5, r0, 0x1
923         l.sll   r5, r5, r6      // r5 = number IMMU sets from IMMUCFGR
924         l.addi  r6, r5, -1      // r6 = nsets mask
925         l.and   r2, r3, r6      // r2 <- r3 % NSETS_MASK
926
927         l.or    r6,r6,r4                   // r6 <- r4
928         l.ori   r6,r6,~(VPN_MASK)          // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
929         l.movhi r5,hi(ITLB_MR_MASK)        // r5 <- ffff:0000.x000
930         l.ori   r5,r5,lo(ITLB_MR_MASK)     // r5 <- ffff:1111.x001 - apply ITLB_MR_MASK
931         l.and   r5,r5,r6                   // r5 <- VPN :VPN .x001 - we have ITLBMR entry
932         l.mtspr r2,r5,SPR_ITLBMR_BASE(0)   // set ITLBMR
933
934         /*
935          * set up ITLB with no translation for EA <= 0x0fffffff
936          *
937          * we need this for head.S mapping (EA = PA). if we move all functions
938          * which run with mmu enabled into entry.S, we might be able to eliminate this.
939          *
940          */
941         LOAD_SYMBOL_2_GPR(r6,0x0fffffff)
942         l.sfgeu  r6,r4                     // flag if r6 >= r4 (if 0xb0ffffff >= EA)
943         l.bf     1f                        // goto out
944         l.and    r3,r4,r4                  // delay slot :: 24 <- r4 (if flag==1)
945
946         tophys(r3,r4)                      // r3 <- PA
947 1:
948         l.ori   r3,r3,~(PPN_MASK)          // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
949         l.movhi r5,hi(ITLB_TR_MASK)        // r5 <- ffff:0000.x000
950         l.ori   r5,r5,lo(ITLB_TR_MASK)     // r5 <- ffff:1111.x050 - apply ITLB_MR_MASK
951         l.and   r5,r5,r3                   // r5 <- PPN :PPN .x050 - we have ITLBTR entry
952         l.mtspr r2,r5,SPR_ITLBTR_BASE(0)   // set ITLBTR
953
954         EXCEPTION_LOAD_GPR6
955         EXCEPTION_LOAD_GPR5
956         EXCEPTION_LOAD_GPR4
957         EXCEPTION_LOAD_GPR3
958         EXCEPTION_LOAD_GPR2
959
960         l.rfe                              // SR <- ESR, PC <- EPC
961
962 exit_with_no_itranslation:
963         EXCEPTION_LOAD_GPR4
964         EXCEPTION_LOAD_GPR6
965         l.j    _dispatch_bus_fault
966         l.nop
967
968 /* ====================================================================== */
969 /*
970  * Stuff below here shouldn't go into .head section... maybe this stuff
971  * can be moved to entry.S ???
972  */
973
974 /* ==============================================[ DTLB miss handler ]=== */
975
976 /*
977  * Comments:
978  *   Exception handlers are entered with MMU off so the following handler
979  *   needs to use physical addressing
980  *
981  */
982
983         .text
984 ENTRY(dtlb_miss_handler)
985         EXCEPTION_STORE_GPR2
986         EXCEPTION_STORE_GPR3
987         EXCEPTION_STORE_GPR4
988         /*
989          * get EA of the miss
990          */
991         l.mfspr r2,r0,SPR_EEAR_BASE
992         /*
993          * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
994          */
995         GET_CURRENT_PGD(r3,r4)          // r3 is current_pgd, r4 is temp
996         l.srli  r4,r2,0x18              // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
997         l.slli  r4,r4,0x2               // to get address << 2
998         l.add   r3,r4,r3                // r4 is pgd_index(daddr)
999         /*
1000          * if (pmd_none(*pmd))
1001          *   goto pmd_none:
1002          */
1003         tophys  (r4,r3)
1004         l.lwz   r3,0x0(r4)              // get *pmd value
1005         l.sfne  r3,r0
1006         l.bnf   d_pmd_none
1007          l.addi r3,r0,0xffffe000        // PAGE_MASK
1008
1009 d_pmd_good:
1010         /*
1011          * pte = *pte_offset(pmd, daddr);
1012          */
1013         l.lwz   r4,0x0(r4)              // get **pmd value
1014         l.and   r4,r4,r3                // & PAGE_MASK
1015         l.srli  r2,r2,0xd               // >> PAGE_SHIFT, r2 == EEAR
1016         l.andi  r3,r2,0x7ff             // (1UL << PAGE_SHIFT - 2) - 1
1017         l.slli  r3,r3,0x2               // to get address << 2
1018         l.add   r3,r3,r4
1019         l.lwz   r3,0x0(r3)              // this is pte at last
1020         /*
1021          * if (!pte_present(pte))
1022          */
1023         l.andi  r4,r3,0x1
1024         l.sfne  r4,r0                   // is pte present
1025         l.bnf   d_pte_not_present
1026         l.addi  r4,r0,0xffffe3fa        // PAGE_MASK | DTLB_UP_CONVERT_MASK
1027         /*
1028          * fill DTLB TR register
1029          */
1030         l.and   r4,r3,r4                // apply the mask
1031         // Determine number of DMMU sets
1032         l.mfspr r2, r0, SPR_DMMUCFGR
1033         l.andi  r2, r2, SPR_DMMUCFGR_NTS
1034         l.srli  r2, r2, SPR_DMMUCFGR_NTS_OFF
1035         l.ori   r3, r0, 0x1
1036         l.sll   r3, r3, r2      // r3 = number DMMU sets DMMUCFGR
1037         l.addi  r2, r3, -1      // r2 = nsets mask
1038         l.mfspr r3, r0, SPR_EEAR_BASE
1039         l.srli  r3, r3, 0xd     // >> PAGE_SHIFT
1040         l.and   r2, r3, r2      // calc offset:  & (NUM_TLB_ENTRIES-1)
1041                                                            //NUM_TLB_ENTRIES
1042         l.mtspr r2,r4,SPR_DTLBTR_BASE(0)
1043         /*
1044          * fill DTLB MR register
1045          */
1046         l.slli  r3, r3, 0xd             /* << PAGE_SHIFT => EA & PAGE_MASK */
1047         l.ori   r4,r3,0x1               // set hardware valid bit: DTBL_MR entry
1048         l.mtspr r2,r4,SPR_DTLBMR_BASE(0)
1049
1050         EXCEPTION_LOAD_GPR2
1051         EXCEPTION_LOAD_GPR3
1052         EXCEPTION_LOAD_GPR4
1053         l.rfe
1054 d_pmd_none:
1055 d_pte_not_present:
1056         EXCEPTION_LOAD_GPR2
1057         EXCEPTION_LOAD_GPR3
1058         EXCEPTION_LOAD_GPR4
1059         EXCEPTION_HANDLE(_dtlb_miss_page_fault_handler)
1060
1061 /* ==============================================[ ITLB miss handler ]=== */
1062 ENTRY(itlb_miss_handler)
1063         EXCEPTION_STORE_GPR2
1064         EXCEPTION_STORE_GPR3
1065         EXCEPTION_STORE_GPR4
1066         /*
1067          * get EA of the miss
1068          */
1069         l.mfspr r2,r0,SPR_EEAR_BASE
1070
1071         /*
1072          * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
1073          *
1074          */
1075         GET_CURRENT_PGD(r3,r4)          // r3 is current_pgd, r5 is temp
1076         l.srli  r4,r2,0x18              // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
1077         l.slli  r4,r4,0x2               // to get address << 2
1078         l.add   r3,r4,r3                // r4 is pgd_index(daddr)
1079         /*
1080          * if (pmd_none(*pmd))
1081          *   goto pmd_none:
1082          */
1083         tophys  (r4,r3)
1084         l.lwz   r3,0x0(r4)              // get *pmd value
1085         l.sfne  r3,r0
1086         l.bnf   i_pmd_none
1087          l.addi r3,r0,0xffffe000        // PAGE_MASK
1088
1089 i_pmd_good:
1090         /*
1091          * pte = *pte_offset(pmd, iaddr);
1092          *
1093          */
1094         l.lwz   r4,0x0(r4)              // get **pmd value
1095         l.and   r4,r4,r3                // & PAGE_MASK
1096         l.srli  r2,r2,0xd               // >> PAGE_SHIFT, r2 == EEAR
1097         l.andi  r3,r2,0x7ff             // (1UL << PAGE_SHIFT - 2) - 1
1098         l.slli  r3,r3,0x2               // to get address << 2
1099         l.add   r3,r3,r4
1100         l.lwz   r3,0x0(r3)              // this is pte at last
1101         /*
1102          * if (!pte_present(pte))
1103          *
1104          */
1105         l.andi  r4,r3,0x1
1106         l.sfne  r4,r0                   // is pte present
1107         l.bnf   i_pte_not_present
1108          l.addi r4,r0,0xffffe03a        // PAGE_MASK | ITLB_UP_CONVERT_MASK
1109         /*
1110          * fill ITLB TR register
1111          */
1112         l.and   r4,r3,r4                // apply the mask
1113         l.andi  r3,r3,0x7c0             // _PAGE_EXEC | _PAGE_SRE | _PAGE_SWE |  _PAGE_URE | _PAGE_UWE
1114         l.sfeq  r3,r0
1115         l.bf    itlb_tr_fill //_workaround
1116         // Determine number of IMMU sets
1117         l.mfspr r2, r0, SPR_IMMUCFGR
1118         l.andi  r2, r2, SPR_IMMUCFGR_NTS
1119         l.srli  r2, r2, SPR_IMMUCFGR_NTS_OFF
1120         l.ori   r3, r0, 0x1
1121         l.sll   r3, r3, r2      // r3 = number IMMU sets IMMUCFGR
1122         l.addi  r2, r3, -1      // r2 = nsets mask
1123         l.mfspr r3, r0, SPR_EEAR_BASE
1124         l.srli  r3, r3, 0xd     // >> PAGE_SHIFT
1125         l.and   r2, r3, r2      // calc offset:  & (NUM_TLB_ENTRIES-1)
1126
1127 /*
1128  * __PHX__ :: fixme
1129  * we should not just blindly set executable flags,
1130  * but it does help with ping. the clean way would be to find out
1131  * (and fix it) why stack doesn't have execution permissions
1132  */
1133
1134 itlb_tr_fill_workaround:
1135         l.ori   r4,r4,0xc0              // | (SPR_ITLBTR_UXE | ITLBTR_SXE)
1136 itlb_tr_fill:
1137         l.mtspr r2,r4,SPR_ITLBTR_BASE(0)
1138         /*
1139          * fill DTLB MR register
1140          */
1141         l.slli  r3, r3, 0xd             /* << PAGE_SHIFT => EA & PAGE_MASK */
1142         l.ori   r4,r3,0x1               // set hardware valid bit: ITBL_MR entry
1143         l.mtspr r2,r4,SPR_ITLBMR_BASE(0)
1144
1145         EXCEPTION_LOAD_GPR2
1146         EXCEPTION_LOAD_GPR3
1147         EXCEPTION_LOAD_GPR4
1148         l.rfe
1149
1150 i_pmd_none:
1151 i_pte_not_present:
1152         EXCEPTION_LOAD_GPR2
1153         EXCEPTION_LOAD_GPR3
1154         EXCEPTION_LOAD_GPR4
1155         EXCEPTION_HANDLE(_itlb_miss_page_fault_handler)
1156
1157 /* ==============================================[ boot tlb handlers ]=== */
1158
1159
1160 /* =================================================[ debugging aids ]=== */
1161
1162         .align 64
1163 _immu_trampoline:
1164         .space 64
1165 _immu_trampoline_top:
1166
1167 #define TRAMP_SLOT_0            (0x0)
1168 #define TRAMP_SLOT_1            (0x4)
1169 #define TRAMP_SLOT_2            (0x8)
1170 #define TRAMP_SLOT_3            (0xc)
1171 #define TRAMP_SLOT_4            (0x10)
1172 #define TRAMP_SLOT_5            (0x14)
1173 #define TRAMP_FRAME_SIZE        (0x18)
1174
1175 ENTRY(_immu_trampoline_workaround)
1176         // r2 EEA
1177         // r6 is physical EEA
1178         tophys(r6,r2)
1179
1180         LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
1181         tophys  (r3,r5)                 // r3 is trampoline (physical)
1182
1183         LOAD_SYMBOL_2_GPR(r4,0x15000000)
1184         l.sw    TRAMP_SLOT_0(r3),r4
1185         l.sw    TRAMP_SLOT_1(r3),r4
1186         l.sw    TRAMP_SLOT_4(r3),r4
1187         l.sw    TRAMP_SLOT_5(r3),r4
1188
1189                                         // EPC = EEA - 0x4
1190         l.lwz   r4,0x0(r6)              // load op @ EEA + 0x0 (fc address)
1191         l.sw    TRAMP_SLOT_3(r3),r4     // store it to _immu_trampoline_data
1192         l.lwz   r4,-0x4(r6)             // load op @ EEA - 0x4 (f8 address)
1193         l.sw    TRAMP_SLOT_2(r3),r4     // store it to _immu_trampoline_data
1194
1195         l.srli  r5,r4,26                // check opcode for write access
1196         l.sfeqi r5,0                    // l.j
1197         l.bf    0f
1198         l.sfeqi r5,0x11                 // l.jr
1199         l.bf    1f
1200         l.sfeqi r5,1                    // l.jal
1201         l.bf    2f
1202         l.sfeqi r5,0x12                 // l.jalr
1203         l.bf    3f
1204         l.sfeqi r5,3                    // l.bnf
1205         l.bf    4f
1206         l.sfeqi r5,4                    // l.bf
1207         l.bf    5f
1208 99:
1209         l.nop
1210         l.j     99b                     // should never happen
1211         l.nop   1
1212
1213         // r2 is EEA
1214         // r3 is trampoline address (physical)
1215         // r4 is instruction
1216         // r6 is physical(EEA)
1217         //
1218         // r5
1219
1220 2:      // l.jal
1221
1222         /* 19 20 aa aa  l.movhi r9,0xaaaa
1223          * a9 29 bb bb  l.ori   r9,0xbbbb
1224          *
1225          * where 0xaaaabbbb is EEA + 0x4 shifted right 2
1226          */
1227
1228         l.addi  r6,r2,0x4               // this is 0xaaaabbbb
1229
1230                                         // l.movhi r9,0xaaaa
1231         l.ori   r5,r0,0x1920            // 0x1920 == l.movhi r9
1232         l.sh    (TRAMP_SLOT_0+0x0)(r3),r5
1233         l.srli  r5,r6,16
1234         l.sh    (TRAMP_SLOT_0+0x2)(r3),r5
1235
1236                                         // l.ori   r9,0xbbbb
1237         l.ori   r5,r0,0xa929            // 0xa929 == l.ori r9
1238         l.sh    (TRAMP_SLOT_1+0x0)(r3),r5
1239         l.andi  r5,r6,0xffff
1240         l.sh    (TRAMP_SLOT_1+0x2)(r3),r5
1241
1242         /* falthrough, need to set up new jump offset */
1243
1244
1245 0:      // l.j
1246         l.slli  r6,r4,6                 // original offset shifted left 6 - 2
1247 //      l.srli  r6,r6,6                 // original offset shifted right 2
1248
1249         l.slli  r4,r2,4                 // old jump position: EEA shifted left 4
1250 //      l.srli  r4,r4,6                 // old jump position: shifted right 2
1251
1252         l.addi  r5,r3,0xc               // new jump position (physical)
1253         l.slli  r5,r5,4                 // new jump position: shifted left 4
1254
1255         // calculate new jump offset
1256         // new_off = old_off + (old_jump - new_jump)
1257
1258         l.sub   r5,r4,r5                // old_jump - new_jump
1259         l.add   r5,r6,r5                // orig_off + (old_jump - new_jump)
1260         l.srli  r5,r5,6                 // new offset shifted right 2
1261
1262         // r5 is new jump offset
1263                                         // l.j has opcode 0x0...
1264         l.sw    TRAMP_SLOT_2(r3),r5     // write it back
1265
1266         l.j     trampoline_out
1267         l.nop
1268
1269 /* ----------------------------- */
1270
1271 3:      // l.jalr
1272
1273         /* 19 20 aa aa  l.movhi r9,0xaaaa
1274          * a9 29 bb bb  l.ori   r9,0xbbbb
1275          *
1276          * where 0xaaaabbbb is EEA + 0x4 shifted right 2
1277          */
1278
1279         l.addi  r6,r2,0x4               // this is 0xaaaabbbb
1280
1281                                         // l.movhi r9,0xaaaa
1282         l.ori   r5,r0,0x1920            // 0x1920 == l.movhi r9
1283         l.sh    (TRAMP_SLOT_0+0x0)(r3),r5
1284         l.srli  r5,r6,16
1285         l.sh    (TRAMP_SLOT_0+0x2)(r3),r5
1286
1287                                         // l.ori   r9,0xbbbb
1288         l.ori   r5,r0,0xa929            // 0xa929 == l.ori r9
1289         l.sh    (TRAMP_SLOT_1+0x0)(r3),r5
1290         l.andi  r5,r6,0xffff
1291         l.sh    (TRAMP_SLOT_1+0x2)(r3),r5
1292
1293         l.lhz   r5,(TRAMP_SLOT_2+0x0)(r3)       // load hi part of jump instruction
1294         l.andi  r5,r5,0x3ff             // clear out opcode part
1295         l.ori   r5,r5,0x4400            // opcode changed from l.jalr -> l.jr
1296         l.sh    (TRAMP_SLOT_2+0x0)(r3),r5 // write it back
1297
1298         /* falthrough */
1299
1300 1:      // l.jr
1301         l.j     trampoline_out
1302         l.nop
1303
1304 /* ----------------------------- */
1305
1306 4:      // l.bnf
1307 5:      // l.bf
1308         l.slli  r6,r4,6                 // original offset shifted left 6 - 2
1309 //      l.srli  r6,r6,6                 // original offset shifted right 2
1310
1311         l.slli  r4,r2,4                 // old jump position: EEA shifted left 4
1312 //      l.srli  r4,r4,6                 // old jump position: shifted right 2
1313
1314         l.addi  r5,r3,0xc               // new jump position (physical)
1315         l.slli  r5,r5,4                 // new jump position: shifted left 4
1316
1317         // calculate new jump offset
1318         // new_off = old_off + (old_jump - new_jump)
1319
1320         l.add   r6,r6,r4                // (orig_off + old_jump)
1321         l.sub   r6,r6,r5                // (orig_off + old_jump) - new_jump
1322         l.srli  r6,r6,6                 // new offset shifted right 2
1323
1324         // r6 is new jump offset
1325         l.lwz   r4,(TRAMP_SLOT_2+0x0)(r3)       // load jump instruction
1326         l.srli  r4,r4,16
1327         l.andi  r4,r4,0xfc00            // get opcode part
1328         l.slli  r4,r4,16
1329         l.or    r6,r4,r6                // l.b(n)f new offset
1330         l.sw    TRAMP_SLOT_2(r3),r6     // write it back
1331
1332         /* we need to add l.j to EEA + 0x8 */
1333         tophys  (r4,r2)                 // may not be needed (due to shifts down_
1334         l.addi  r4,r4,(0x8 - 0x8)       // jump target = r2 + 0x8 (compensate for 0x8)
1335                                         // jump position = r5 + 0x8 (0x8 compensated)
1336         l.sub   r4,r4,r5                // jump offset = target - new_position + 0x8
1337
1338         l.slli  r4,r4,4                 // the amount of info in imediate of jump
1339         l.srli  r4,r4,6                 // jump instruction with offset
1340         l.sw    TRAMP_SLOT_4(r3),r4     // write it to 4th slot
1341
1342         /* fallthrough */
1343
1344 trampoline_out:
1345         // set up new EPC to point to our trampoline code
1346         LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
1347         l.mtspr r0,r5,SPR_EPCR_BASE
1348
1349         // immu_trampoline is (4x) CACHE_LINE aligned
1350         // and only 6 instructions long,
1351         // so we need to invalidate only 2 lines
1352
1353         /* Establish cache block size
1354            If BS=0, 16;
1355            If BS=1, 32;
1356            r14 contain block size
1357         */
1358         l.mfspr r21,r0,SPR_ICCFGR
1359         l.andi  r21,r21,SPR_ICCFGR_CBS
1360         l.srli  r21,r21,7
1361         l.ori   r23,r0,16
1362         l.sll   r14,r23,r21
1363
1364         l.mtspr r0,r5,SPR_ICBIR
1365         l.add   r5,r5,r14
1366         l.mtspr r0,r5,SPR_ICBIR
1367
1368         l.jr    r9
1369         l.nop
1370
1371
1372 /*
1373  * DSCR: prints a string referenced by r3.
1374  *
1375  * PRMS: r3             - address of the first character of null
1376  *                      terminated string to be printed
1377  *
1378  * PREQ: UART at UART_BASE_ADD has to be initialized
1379  *
1380  * POST: caller should be aware that r3, r9 are changed
1381  */
1382 ENTRY(_emergency_print)
1383         EMERGENCY_PRINT_STORE_GPR4
1384         EMERGENCY_PRINT_STORE_GPR5
1385         EMERGENCY_PRINT_STORE_GPR6
1386         EMERGENCY_PRINT_STORE_GPR7
1387 2:
1388         l.lbz   r7,0(r3)
1389         l.sfeq  r7,r0
1390         l.bf    9f
1391         l.nop
1392
1393 // putc:
1394         l.movhi r4,hi(UART_BASE_ADD)
1395
1396         l.addi  r6,r0,0x20
1397 1:      l.lbz   r5,5(r4)
1398         l.andi  r5,r5,0x20
1399         l.sfeq  r5,r6
1400         l.bnf   1b
1401         l.nop
1402
1403         l.sb    0(r4),r7
1404
1405         l.addi  r6,r0,0x60
1406 1:      l.lbz   r5,5(r4)
1407         l.andi  r5,r5,0x60
1408         l.sfeq  r5,r6
1409         l.bnf   1b
1410         l.nop
1411
1412         /* next character */
1413         l.j     2b
1414         l.addi  r3,r3,0x1
1415
1416 9:
1417         EMERGENCY_PRINT_LOAD_GPR7
1418         EMERGENCY_PRINT_LOAD_GPR6
1419         EMERGENCY_PRINT_LOAD_GPR5
1420         EMERGENCY_PRINT_LOAD_GPR4
1421         l.jr    r9
1422         l.nop
1423
1424 ENTRY(_emergency_print_nr)
1425         EMERGENCY_PRINT_STORE_GPR4
1426         EMERGENCY_PRINT_STORE_GPR5
1427         EMERGENCY_PRINT_STORE_GPR6
1428         EMERGENCY_PRINT_STORE_GPR7
1429         EMERGENCY_PRINT_STORE_GPR8
1430
1431         l.addi  r8,r0,32                // shift register
1432
1433 1:      /* remove leading zeros */
1434         l.addi  r8,r8,-0x4
1435         l.srl   r7,r3,r8
1436         l.andi  r7,r7,0xf
1437
1438         /* don't skip the last zero if number == 0x0 */
1439         l.sfeqi r8,0x4
1440         l.bf    2f
1441         l.nop
1442
1443         l.sfeq  r7,r0
1444         l.bf    1b
1445         l.nop
1446
1447 2:
1448         l.srl   r7,r3,r8
1449
1450         l.andi  r7,r7,0xf
1451         l.sflts r8,r0
1452         l.bf    9f
1453
1454         l.sfgtui r7,0x9
1455         l.bnf   8f
1456         l.nop
1457         l.addi  r7,r7,0x27
1458
1459 8:
1460         l.addi  r7,r7,0x30
1461 // putc:
1462         l.movhi r4,hi(UART_BASE_ADD)
1463
1464         l.addi  r6,r0,0x20
1465 1:      l.lbz   r5,5(r4)
1466         l.andi  r5,r5,0x20
1467         l.sfeq  r5,r6
1468         l.bnf   1b
1469         l.nop
1470
1471         l.sb    0(r4),r7
1472
1473         l.addi  r6,r0,0x60
1474 1:      l.lbz   r5,5(r4)
1475         l.andi  r5,r5,0x60
1476         l.sfeq  r5,r6
1477         l.bnf   1b
1478         l.nop
1479
1480         /* next character */
1481         l.j     2b
1482         l.addi  r8,r8,-0x4
1483
1484 9:
1485         EMERGENCY_PRINT_LOAD_GPR8
1486         EMERGENCY_PRINT_LOAD_GPR7
1487         EMERGENCY_PRINT_LOAD_GPR6
1488         EMERGENCY_PRINT_LOAD_GPR5
1489         EMERGENCY_PRINT_LOAD_GPR4
1490         l.jr    r9
1491         l.nop
1492
1493
1494 /*
1495  * This should be used for debugging only.
1496  * It messes up the Linux early serial output
1497  * somehow, so use it sparingly and essentially
1498  * only if you need to debug something that goes wrong
1499  * before Linux gets the early serial going.
1500  *
1501  * Furthermore, you'll have to make sure you set the
1502  * UART_DEVISOR correctly according to the system
1503  * clock rate.
1504  *
1505  *
1506  */
1507
1508
1509
1510 #define SYS_CLK            20000000
1511 //#define SYS_CLK            1843200
1512 #define OR32_CONSOLE_BAUD  115200
1513 #define UART_DIVISOR       SYS_CLK/(16*OR32_CONSOLE_BAUD)
1514
1515 ENTRY(_early_uart_init)
1516         l.movhi r3,hi(UART_BASE_ADD)
1517
1518         l.addi  r4,r0,0x7
1519         l.sb    0x2(r3),r4
1520
1521         l.addi  r4,r0,0x0
1522         l.sb    0x1(r3),r4
1523
1524         l.addi  r4,r0,0x3
1525         l.sb    0x3(r3),r4
1526
1527         l.lbz   r5,3(r3)
1528         l.ori   r4,r5,0x80
1529         l.sb    0x3(r3),r4
1530         l.addi  r4,r0,((UART_DIVISOR>>8) & 0x000000ff)
1531         l.sb    UART_DLM(r3),r4
1532         l.addi  r4,r0,((UART_DIVISOR) & 0x000000ff)
1533         l.sb    UART_DLL(r3),r4
1534         l.sb    0x3(r3),r5
1535
1536         l.jr    r9
1537         l.nop
1538
1539         .section .rodata
1540 _string_unhandled_exception:
1541         .string "\n\rRunarunaround: Unhandled exception 0x\0"
1542
1543 _string_epc_prefix:
1544         .string ": EPC=0x\0"
1545
1546 _string_nl:
1547         .string "\n\r\0"
1548
1549
1550 /* ========================================[ page aligned structures ]=== */
1551
1552 /*
1553  * .data section should be page aligned
1554  *      (look into arch/openrisc/kernel/vmlinux.lds.S)
1555  */
1556         .section .data,"aw"
1557         .align  8192
1558         .global  empty_zero_page
1559 empty_zero_page:
1560         .space  8192
1561
1562         .global  swapper_pg_dir
1563 swapper_pg_dir:
1564         .space  8192
1565
1566         .global _unhandled_stack
1567 _unhandled_stack:
1568         .space  8192
1569 _unhandled_stack_top:
1570
1571 /* ============================================================[ EOF ]=== */