1 #ifndef _ASM_POWERPC_EXCEPTION_H
2 #define _ASM_POWERPC_EXCEPTION_H
4 * Extracted from head_64.S
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
9 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
10 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
11 * Adapted for Power Macintosh by Paul Mackerras.
12 * Low-level exception handlers and MMU support
13 * rewritten by Paul Mackerras.
14 * Copyright (C) 1996 Paul Mackerras.
16 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
17 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
19 * This file contains the low-level support and setup for the
20 * PowerPC-64 platform, including trap and interrupt dispatch.
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
28 * The following macros define the code that appears as
29 * the prologue to each of the exception handlers. They
30 * are split into two parts to allow a single kernel binary
31 * to be used for pSeries and iSeries.
33 * We make as much of the exception code common between native
34 * exception handlers (including pSeries LPAR) and iSeries LPAR
35 * implementations as possible.
37 #include <asm/head-64.h>
38 #include <asm/feature-fixups.h>
40 /* PACA save area offsets (exgen, exmc, etc) */
52 #if defined(CONFIG_RELOCATABLE)
54 #define EX_SIZE 11 /* size in u64 units */
56 #define EX_SIZE 10 /* size in u64 units */
60 * maximum recursive depth of MCE exceptions
62 #define MAX_MCE_DEPTH 4
65 * EX_R3 is only used by the bad_stack handler. bad_stack reloads and
66 * saves DAR from SPRN_DAR, and EX_DAR is not used. So EX_R3 can overlap
71 #define STF_ENTRY_BARRIER_SLOT \
72 STF_ENTRY_BARRIER_FIXUP_SECTION; \
77 #define STF_EXIT_BARRIER_SLOT \
78 STF_EXIT_BARRIER_FIXUP_SECTION; \
86 #define ENTRY_FLUSH_SLOT \
87 ENTRY_FLUSH_FIXUP_SECTION; \
93 * r10 must be free to use, r13 must be paca
95 #define INTERRUPT_TO_KERNEL \
96 STF_ENTRY_BARRIER_SLOT; \
100 * Macros for annotating the expected destination of (h)rfid
102 * The nop instructions allow us to insert one or more instructions to flush the
103 * L1-D cache when returning to userspace or a guest.
105 #define RFI_FLUSH_SLOT \
106 RFI_FLUSH_FIXUP_SECTION; \
111 #define RFI_TO_KERNEL \
114 #define RFI_TO_USER \
115 STF_EXIT_BARRIER_SLOT; \
120 #define RFI_TO_USER_OR_KERNEL \
121 STF_EXIT_BARRIER_SLOT; \
126 #define RFI_TO_GUEST \
127 STF_EXIT_BARRIER_SLOT; \
132 #define HRFI_TO_KERNEL \
135 #define HRFI_TO_USER \
136 STF_EXIT_BARRIER_SLOT; \
139 b hrfi_flush_fallback
141 #define HRFI_TO_USER_OR_KERNEL \
142 STF_EXIT_BARRIER_SLOT; \
145 b hrfi_flush_fallback
147 #define HRFI_TO_GUEST \
148 STF_EXIT_BARRIER_SLOT; \
151 b hrfi_flush_fallback
153 #define HRFI_TO_UNKNOWN \
154 STF_EXIT_BARRIER_SLOT; \
157 b hrfi_flush_fallback
159 #ifdef CONFIG_RELOCATABLE
160 #define __EXCEPTION_PROLOG_2_RELON(label, h) \
161 mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
162 LOAD_HANDLER(r12,label); \
164 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
166 mtmsrd r10,1; /* Set RI (EE=0) */ \
169 /* If not relocatable, we can jump directly -- and save messing with LR */
170 #define __EXCEPTION_PROLOG_2_RELON(label, h) \
171 mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
172 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
174 mtmsrd r10,1; /* Set RI (EE=0) */ \
177 #define EXCEPTION_PROLOG_2_RELON(label, h) \
178 __EXCEPTION_PROLOG_2_RELON(label, h)
181 * As EXCEPTION_PROLOG(), except we've already got relocation on so no need to
182 * rfid. Save LR in case we're CONFIG_RELOCATABLE, in which case
183 * EXCEPTION_PROLOG_2_RELON will be using LR.
185 #define EXCEPTION_RELON_PROLOG(area, label, h, extra, vec) \
186 SET_SCRATCH0(r13); /* save r13 */ \
187 EXCEPTION_PROLOG_0(area); \
188 EXCEPTION_PROLOG_1(area, extra, vec); \
189 EXCEPTION_PROLOG_2_RELON(label, h)
192 * We're short on space and time in the exception prolog, so we can't
193 * use the normal LOAD_REG_IMMEDIATE macro to load the address of label.
194 * Instead we get the base of the kernel from paca->kernelbase and or in the low
195 * part of label. This requires that the label be within 64KB of kernelbase, and
196 * that kernelbase be 64K aligned.
198 #define LOAD_HANDLER(reg, label) \
199 ld reg,PACAKBASE(r13); /* get high part of &label */ \
200 ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label);
202 #define __LOAD_HANDLER(reg, label) \
203 ld reg,PACAKBASE(r13); \
204 ori reg,reg,(ABS_ADDR(label))@l;
207 * Branches from unrelocated code (e.g., interrupts) to labels outside
208 * head-y require >64K offsets.
210 #define __LOAD_FAR_HANDLER(reg, label) \
211 ld reg,PACAKBASE(r13); \
212 ori reg,reg,(ABS_ADDR(label))@l; \
213 addis reg,reg,(ABS_ADDR(label))@h;
215 /* Exception register prefixes */
219 #if defined(CONFIG_RELOCATABLE)
221 * If we support interrupts with relocation on AND we're a relocatable kernel,
222 * we need to use CTR to get to the 2nd level handler. So, save/restore it
225 #define SAVE_CTR(reg, area) mfctr reg ; std reg,area+EX_CTR(r13)
226 #define GET_CTR(reg, area) ld reg,area+EX_CTR(r13)
227 #define RESTORE_CTR(reg, area) ld reg,area+EX_CTR(r13) ; mtctr reg
229 /* ...else CTR is unused and in register. */
230 #define SAVE_CTR(reg, area)
231 #define GET_CTR(reg, area) mfctr reg
232 #define RESTORE_CTR(reg, area)
236 * PPR save/restore macros used in exceptions_64s.S
237 * Used for P7 or later processors
239 #define SAVE_PPR(area, ra) \
240 BEGIN_FTR_SECTION_NESTED(940) \
241 ld ra,area+EX_PPR(r13); /* Read PPR from paca */ \
242 std ra,RESULT(r1); /* Store PPR in RESULT for now */ \
243 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940)
246 * This is called after we are finished accessing 'area', so we can now take
247 * SLB faults accessing the thread struct, which will use PACA_EXSLB area.
248 * This is required because the large_addr_slb handler uses EXSLB and it also
249 * uses the common exception macros including this PPR saving.
251 #define MOVE_PPR_TO_THREAD(ra, rb) \
252 BEGIN_FTR_SECTION_NESTED(940) \
253 ld ra,PACACURRENT(r13); \
254 ld rb,RESULT(r1); /* Read PPR from stack */ \
255 std rb,TASKTHREADPPR(ra); \
256 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940)
258 #define RESTORE_PPR_PACA(area, ra) \
259 BEGIN_FTR_SECTION_NESTED(941) \
260 ld ra,area+EX_PPR(r13); \
262 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941)
265 * Get an SPR into a register if the CPU has the given feature
267 #define OPT_GET_SPR(ra, spr, ftr) \
268 BEGIN_FTR_SECTION_NESTED(943) \
270 END_FTR_SECTION_NESTED(ftr,ftr,943)
273 * Set an SPR from a register if the CPU has the given feature
275 #define OPT_SET_SPR(ra, spr, ftr) \
276 BEGIN_FTR_SECTION_NESTED(943) \
278 END_FTR_SECTION_NESTED(ftr,ftr,943)
281 * Save a register to the PACA if the CPU has the given feature
283 #define OPT_SAVE_REG_TO_PACA(offset, ra, ftr) \
284 BEGIN_FTR_SECTION_NESTED(943) \
285 std ra,offset(r13); \
286 END_FTR_SECTION_NESTED(ftr,ftr,943)
288 #define EXCEPTION_PROLOG_0(area) \
290 std r9,area+EX_R9(r13); /* save r9 */ \
291 OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \
293 std r10,area+EX_R10(r13); /* save r10 - r12 */ \
294 OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
296 #define __EXCEPTION_PROLOG_1_PRE(area) \
297 OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
298 OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
299 INTERRUPT_TO_KERNEL; \
300 SAVE_CTR(r10, area); \
303 #define __EXCEPTION_PROLOG_1_POST(area) \
304 std r11,area+EX_R11(r13); \
305 std r12,area+EX_R12(r13); \
307 std r10,area+EX_R13(r13)
310 * This version of the EXCEPTION_PROLOG_1 will carry
311 * addition parameter called "bitmask" to support
312 * checking of the interrupt maskable level in the SOFTEN_TEST.
313 * Intended to be used in MASKABLE_EXCPETION_* macros.
315 #define MASKABLE_EXCEPTION_PROLOG_1(area, extra, vec, bitmask) \
316 __EXCEPTION_PROLOG_1_PRE(area); \
317 extra(vec, bitmask); \
318 __EXCEPTION_PROLOG_1_POST(area);
321 * This version of the EXCEPTION_PROLOG_1 is intended
322 * to be used in STD_EXCEPTION* macros
324 #define _EXCEPTION_PROLOG_1(area, extra, vec) \
325 __EXCEPTION_PROLOG_1_PRE(area); \
327 __EXCEPTION_PROLOG_1_POST(area);
329 #define EXCEPTION_PROLOG_1(area, extra, vec) \
330 _EXCEPTION_PROLOG_1(area, extra, vec)
332 #define __EXCEPTION_PROLOG_2(label, h) \
333 ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
334 mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
335 LOAD_HANDLER(r12,label) \
336 mtspr SPRN_##h##SRR0,r12; \
337 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
338 mtspr SPRN_##h##SRR1,r10; \
340 b . /* prevent speculative execution */
341 #define EXCEPTION_PROLOG_2(label, h) \
342 __EXCEPTION_PROLOG_2(label, h)
344 /* _NORI variant keeps MSR_RI clear */
345 #define __EXCEPTION_PROLOG_2_NORI(label, h) \
346 ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
347 xori r10,r10,MSR_RI; /* Clear MSR_RI */ \
348 mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
349 LOAD_HANDLER(r12,label) \
350 mtspr SPRN_##h##SRR0,r12; \
351 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
352 mtspr SPRN_##h##SRR1,r10; \
354 b . /* prevent speculative execution */
356 #define EXCEPTION_PROLOG_2_NORI(label, h) \
357 __EXCEPTION_PROLOG_2_NORI(label, h)
359 #define EXCEPTION_PROLOG(area, label, h, extra, vec) \
360 SET_SCRATCH0(r13); /* save r13 */ \
361 EXCEPTION_PROLOG_0(area); \
362 EXCEPTION_PROLOG_1(area, extra, vec); \
363 EXCEPTION_PROLOG_2(label, h);
365 #define __KVMTEST(h, n) \
366 lbz r10,HSTATE_IN_GUEST(r13); \
370 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
372 * If hv is possible, interrupts come into to the hv version
373 * of the kvmppc_interrupt code, which then jumps to the PR handler,
374 * kvmppc_interrupt_pr, if the guest is a PR guest.
376 #define kvmppc_interrupt kvmppc_interrupt_hv
378 #define kvmppc_interrupt kvmppc_interrupt_pr
382 * Branch to label using its 0xC000 address. This results in instruction
383 * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned
384 * on using mtmsr rather than rfid.
386 * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than
387 * load KBASE for a slight optimisation.
389 #define BRANCH_TO_C000(reg, label) \
390 __LOAD_HANDLER(reg, label); \
394 #ifdef CONFIG_RELOCATABLE
395 #define BRANCH_TO_COMMON(reg, label) \
396 __LOAD_HANDLER(reg, label); \
400 #define BRANCH_LINK_TO_FAR(label) \
401 __LOAD_FAR_HANDLER(r12, label); \
406 * KVM requires __LOAD_FAR_HANDLER.
408 * __BRANCH_TO_KVM_EXIT branches are also a special case because they
409 * explicitly use r9 then reload it from PACA before branching. Hence
410 * the double-underscore.
412 #define __BRANCH_TO_KVM_EXIT(area, label) \
414 std r9,HSTATE_SCRATCH1(r13); \
415 __LOAD_FAR_HANDLER(r9, label); \
417 ld r9,area+EX_R9(r13); \
421 #define BRANCH_TO_COMMON(reg, label) \
424 #define BRANCH_LINK_TO_FAR(label) \
427 #define __BRANCH_TO_KVM_EXIT(area, label) \
428 ld r9,area+EX_R9(r13); \
433 /* Do not enable RI */
434 #define EXCEPTION_PROLOG_NORI(area, label, h, extra, vec) \
435 EXCEPTION_PROLOG_0(area); \
436 EXCEPTION_PROLOG_1(area, extra, vec); \
437 EXCEPTION_PROLOG_2_NORI(label, h);
440 #define __KVM_HANDLER(area, h, n) \
441 BEGIN_FTR_SECTION_NESTED(947) \
442 ld r10,area+EX_CFAR(r13); \
443 std r10,HSTATE_CFAR(r13); \
444 END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \
445 BEGIN_FTR_SECTION_NESTED(948) \
446 ld r10,area+EX_PPR(r13); \
447 std r10,HSTATE_PPR(r13); \
448 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \
449 ld r10,area+EX_R10(r13); \
450 std r12,HSTATE_SCRATCH0(r13); \
453 /* This reloads r9 before branching to kvmppc_interrupt */ \
454 __BRANCH_TO_KVM_EXIT(area, kvmppc_interrupt)
456 #define __KVM_HANDLER_SKIP(area, h, n) \
457 cmpwi r10,KVM_GUEST_MODE_SKIP; \
459 BEGIN_FTR_SECTION_NESTED(948) \
460 ld r10,area+EX_PPR(r13); \
461 std r10,HSTATE_PPR(r13); \
462 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \
463 ld r10,area+EX_R10(r13); \
464 std r12,HSTATE_SCRATCH0(r13); \
467 /* This reloads r9 before branching to kvmppc_interrupt */ \
468 __BRANCH_TO_KVM_EXIT(area, kvmppc_interrupt); \
469 89: mtocrf 0x80,r9; \
470 ld r9,area+EX_R9(r13); \
471 ld r10,area+EX_R10(r13); \
472 b kvmppc_skip_##h##interrupt
474 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
475 #define KVMTEST(h, n) __KVMTEST(h, n)
476 #define KVM_HANDLER(area, h, n) __KVM_HANDLER(area, h, n)
477 #define KVM_HANDLER_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n)
480 #define KVMTEST(h, n)
481 #define KVM_HANDLER(area, h, n)
482 #define KVM_HANDLER_SKIP(area, h, n)
487 #define EXCEPTION_PROLOG_COMMON_1() \
488 std r9,_CCR(r1); /* save CR in stackframe */ \
489 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
490 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
491 std r10,0(r1); /* make stack chain pointer */ \
492 std r0,GPR0(r1); /* save r0 in stackframe */ \
493 std r10,GPR1(r1); /* save r1 in stackframe */ \
497 * The common exception prolog is used for all except a few exceptions
498 * such as a segment miss on a kernel address. We have to be prepared
499 * to take another exception from the point where we first touch the
500 * kernel stack onwards.
502 * On entry r13 points to the paca, r9-r13 are saved in the paca,
503 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
504 * SRR1, and relocation is on.
506 #define EXCEPTION_PROLOG_COMMON(n, area) \
507 andi. r10,r12,MSR_PR; /* See if coming from user */ \
508 mr r10,r1; /* Save r1 */ \
509 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
511 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
512 1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \
513 blt+ cr1,3f; /* abort if it is */ \
514 li r1,(n); /* will be reloaded later */ \
515 sth r1,PACA_TRAP_SAVE(r13); \
516 std r3,area+EX_R3(r13); \
517 addi r3,r13,area; /* r3 -> where regs are saved*/ \
518 RESTORE_CTR(r1, area); \
520 3: EXCEPTION_PROLOG_COMMON_1(); \
521 beq 4f; /* if from kernel mode */ \
522 ACCOUNT_CPU_USER_ENTRY(r13, r9, r10); \
523 SAVE_PPR(area, r9); \
524 4: EXCEPTION_PROLOG_COMMON_2(area) \
525 beq 5f; /* if from kernel mode */ \
526 MOVE_PPR_TO_THREAD(r9, r10); \
527 5: EXCEPTION_PROLOG_COMMON_3(n) \
530 /* Save original regs values from save area to stack frame. */
531 #define EXCEPTION_PROLOG_COMMON_2(area) \
532 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
533 ld r10,area+EX_R10(r13); \
536 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
537 ld r10,area+EX_R12(r13); \
538 ld r11,area+EX_R13(r13); \
542 BEGIN_FTR_SECTION_NESTED(66); \
543 ld r10,area+EX_CFAR(r13); \
544 std r10,ORIG_GPR3(r1); \
545 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
546 GET_CTR(r10, area); \
549 #define EXCEPTION_PROLOG_COMMON_3(n) \
550 std r2,GPR2(r1); /* save r2 in stackframe */ \
551 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
552 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
553 mflr r9; /* Get LR, later save to stack */ \
554 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
556 lbz r10,PACAIRQSOFTMASK(r13); \
557 mfspr r11,SPRN_XER; /* save XER in stackframe */ \
561 std r9,_TRAP(r1); /* set trap number */ \
563 ld r11,exception_marker@toc(r2); \
564 std r10,RESULT(r1); /* clear regs->result */ \
565 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
570 #define STD_EXCEPTION(vec, label) \
571 EXCEPTION_PROLOG(PACA_EXGEN, label, EXC_STD, KVMTEST_PR, vec);
573 /* Version of above for when we have to branch out-of-line */
574 #define __OOL_EXCEPTION(vec, label, hdlr) \
576 EXCEPTION_PROLOG_0(PACA_EXGEN) \
579 #define STD_EXCEPTION_OOL(vec, label) \
580 EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \
581 EXCEPTION_PROLOG_2(label, EXC_STD)
583 #define STD_EXCEPTION_HV(loc, vec, label) \
584 EXCEPTION_PROLOG(PACA_EXGEN, label, EXC_HV, KVMTEST_HV, vec);
586 #define STD_EXCEPTION_HV_OOL(vec, label) \
587 EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, vec); \
588 EXCEPTION_PROLOG_2(label, EXC_HV)
590 #define STD_RELON_EXCEPTION(loc, vec, label) \
591 /* No guest interrupts come through here */ \
592 EXCEPTION_RELON_PROLOG(PACA_EXGEN, label, EXC_STD, NOTEST, vec);
594 #define STD_RELON_EXCEPTION_OOL(vec, label) \
595 EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \
596 EXCEPTION_PROLOG_2_RELON(label, EXC_STD)
598 #define STD_RELON_EXCEPTION_HV(loc, vec, label) \
599 EXCEPTION_RELON_PROLOG(PACA_EXGEN, label, EXC_HV, KVMTEST_HV, vec);
601 #define STD_RELON_EXCEPTION_HV_OOL(vec, label) \
602 EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, vec); \
603 EXCEPTION_PROLOG_2_RELON(label, EXC_HV)
605 /* This associate vector numbers with bits in paca->irq_happened */
606 #define SOFTEN_VALUE_0x500 PACA_IRQ_EE
607 #define SOFTEN_VALUE_0x900 PACA_IRQ_DEC
608 #define SOFTEN_VALUE_0x980 PACA_IRQ_DEC
609 #define SOFTEN_VALUE_0xa00 PACA_IRQ_DBELL
610 #define SOFTEN_VALUE_0xe80 PACA_IRQ_DBELL
611 #define SOFTEN_VALUE_0xe60 PACA_IRQ_HMI
612 #define SOFTEN_VALUE_0xea0 PACA_IRQ_EE
613 #define SOFTEN_VALUE_0xf00 PACA_IRQ_PMI
615 #define __SOFTEN_TEST(h, vec, bitmask) \
616 lbz r10,PACAIRQSOFTMASK(r13); \
617 andi. r10,r10,bitmask; \
618 li r10,SOFTEN_VALUE_##vec; \
619 bne masked_##h##interrupt
621 #define _SOFTEN_TEST(h, vec, bitmask) __SOFTEN_TEST(h, vec, bitmask)
623 #define SOFTEN_TEST_PR(vec, bitmask) \
624 KVMTEST(EXC_STD, vec); \
625 _SOFTEN_TEST(EXC_STD, vec, bitmask)
627 #define SOFTEN_TEST_HV(vec, bitmask) \
628 KVMTEST(EXC_HV, vec); \
629 _SOFTEN_TEST(EXC_HV, vec, bitmask)
631 #define KVMTEST_PR(vec) \
632 KVMTEST(EXC_STD, vec)
634 #define KVMTEST_HV(vec) \
637 #define SOFTEN_NOTEST_PR(vec, bitmask) _SOFTEN_TEST(EXC_STD, vec, bitmask)
638 #define SOFTEN_NOTEST_HV(vec, bitmask) _SOFTEN_TEST(EXC_HV, vec, bitmask)
640 #define __MASKABLE_EXCEPTION(vec, label, h, extra, bitmask) \
641 SET_SCRATCH0(r13); /* save r13 */ \
642 EXCEPTION_PROLOG_0(PACA_EXGEN); \
643 MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec, bitmask); \
644 EXCEPTION_PROLOG_2(label, h);
646 #define MASKABLE_EXCEPTION(vec, label, bitmask) \
647 __MASKABLE_EXCEPTION(vec, label, EXC_STD, SOFTEN_TEST_PR, bitmask)
649 #define MASKABLE_EXCEPTION_OOL(vec, label, bitmask) \
650 MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_PR, vec, bitmask);\
651 EXCEPTION_PROLOG_2(label, EXC_STD)
653 #define MASKABLE_EXCEPTION_HV(vec, label, bitmask) \
654 __MASKABLE_EXCEPTION(vec, label, EXC_HV, SOFTEN_TEST_HV, bitmask)
656 #define MASKABLE_EXCEPTION_HV_OOL(vec, label, bitmask) \
657 MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\
658 EXCEPTION_PROLOG_2(label, EXC_HV)
660 #define __MASKABLE_RELON_EXCEPTION(vec, label, h, extra, bitmask) \
661 SET_SCRATCH0(r13); /* save r13 */ \
662 EXCEPTION_PROLOG_0(PACA_EXGEN); \
663 MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec, bitmask); \
664 EXCEPTION_PROLOG_2_RELON(label, h)
666 #define MASKABLE_RELON_EXCEPTION(vec, label, bitmask) \
667 __MASKABLE_RELON_EXCEPTION(vec, label, EXC_STD, SOFTEN_NOTEST_PR, bitmask)
669 #define MASKABLE_RELON_EXCEPTION_OOL(vec, label, bitmask) \
670 MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_PR, vec, bitmask);\
671 EXCEPTION_PROLOG_2(label, EXC_STD);
673 #define MASKABLE_RELON_EXCEPTION_HV(vec, label, bitmask) \
674 __MASKABLE_RELON_EXCEPTION(vec, label, EXC_HV, SOFTEN_TEST_HV, bitmask)
676 #define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label, bitmask) \
677 MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\
678 EXCEPTION_PROLOG_2_RELON(label, EXC_HV)
681 * Our exception common code can be passed various "additions"
682 * to specify the behaviour of interrupts, whether to kick the
687 * This addition reconciles our actual IRQ state with the various software
688 * flags that track it. This may call C code.
690 #define ADD_RECONCILE RECONCILE_IRQ_STATE(r10,r11)
695 #define RUNLATCH_ON \
697 CURRENT_THREAD_INFO(r3, r1); \
698 ld r4,TI_LOCAL_FLAGS(r3); \
699 andi. r0,r4,_TLF_RUNLATCH; \
700 beql ppc64_runlatch_on_trampoline; \
701 END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
703 #define EXCEPTION_COMMON(area, trap, label, hdlr, ret, additions) \
704 EXCEPTION_PROLOG_COMMON(trap, area); \
705 /* Volatile regs are potentially clobbered here */ \
707 addi r3,r1,STACK_FRAME_OVERHEAD; \
712 * Exception where stack is already set in r1, r1 is saved in r10, and it
713 * continues rather than returns.
715 #define EXCEPTION_COMMON_NORET_STACK(area, trap, label, hdlr, additions) \
716 EXCEPTION_PROLOG_COMMON_1(); \
717 EXCEPTION_PROLOG_COMMON_2(area); \
718 EXCEPTION_PROLOG_COMMON_3(trap); \
719 /* Volatile regs are potentially clobbered here */ \
721 addi r3,r1,STACK_FRAME_OVERHEAD; \
724 #define STD_EXCEPTION_COMMON(trap, label, hdlr) \
725 EXCEPTION_COMMON(PACA_EXGEN, trap, label, hdlr, \
726 ret_from_except, ADD_NVGPRS;ADD_RECONCILE)
729 * Like STD_EXCEPTION_COMMON, but for exceptions that can occur
730 * in the idle task and therefore need the special idle handling
731 * (finish nap and runlatch)
733 #define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \
734 EXCEPTION_COMMON(PACA_EXGEN, trap, label, hdlr, \
735 ret_from_except_lite, FINISH_NAP;ADD_RECONCILE;RUNLATCH_ON)
738 * When the idle code in power4_idle puts the CPU into NAP mode,
739 * it has to do so in a loop, and relies on the external interrupt
740 * and decrementer interrupt entry code to get it out of the loop.
741 * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
742 * to signal that it is in the loop and needs help to get out.
744 #ifdef CONFIG_PPC_970_NAP
747 CURRENT_THREAD_INFO(r11, r1); \
748 ld r9,TI_LOCAL_FLAGS(r11); \
749 andi. r10,r9,_TLF_NAPPING; \
750 bnel power4_fixup_nap; \
751 END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
756 #endif /* _ASM_POWERPC_EXCEPTION_H */