2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
13 * Derived from book3s_rmhandlers.S and other files, which are:
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
25 #include <asm/ptrace.h>
26 #include <asm/hvcall.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/exception-64s.h>
29 #include <asm/kvm_book3s_asm.h>
30 #include <asm/book3s/64/mmu-hash.h>
34 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
36 /* Values in HSTATE_NAPPING(r13) */
37 #define NAPPING_CEDE 1
38 #define NAPPING_NOVCPU 2
41 * Call kvmppc_hv_entry in real mode.
42 * Must be called with interrupts hard-disabled.
46 * LR = return address to continue at after eventually re-enabling MMU
48 _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
50 std r0, PPC_LR_STKOFF(r1)
53 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
58 mtmsrd r0,1 /* clear RI in MSR */
64 ld r4, HSTATE_KVM_VCPU(r13)
67 /* Back from guest - restore host state and return to caller */
70 /* Restore host DABR and DABRX */
71 ld r5,HSTATE_DABR(r13)
75 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
78 ld r3,PACA_SPRG_VDSO(r13)
79 mtspr SPRN_SPRG_VDSO_WRITE,r3
81 /* Reload the host's PMU registers */
82 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
83 lbz r4, LPPACA_PMCINUSE(r3)
85 beq 23f /* skip if not */
87 ld r3, HSTATE_MMCR0(r13)
88 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
91 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
92 lwz r3, HSTATE_PMC1(r13)
93 lwz r4, HSTATE_PMC2(r13)
94 lwz r5, HSTATE_PMC3(r13)
95 lwz r6, HSTATE_PMC4(r13)
96 lwz r8, HSTATE_PMC5(r13)
97 lwz r9, HSTATE_PMC6(r13)
104 ld r3, HSTATE_MMCR0(r13)
105 ld r4, HSTATE_MMCR1(r13)
106 ld r5, HSTATE_MMCRA(r13)
107 ld r6, HSTATE_SIAR(r13)
108 ld r7, HSTATE_SDAR(r13)
114 ld r8, HSTATE_MMCR2(r13)
115 ld r9, HSTATE_SIER(r13)
118 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
124 * Reload DEC. HDEC interrupts were disabled when
125 * we reloaded the host's LPCR value.
127 ld r3, HSTATE_DECEXP(r13)
132 /* hwthread_req may have got set by cede or no vcpu, so clear it */
134 stb r0, HSTATE_HWTHREAD_REQ(r13)
137 * For external and machine check interrupts, we need
138 * to call the Linux handler to process the interrupt.
139 * We do that by jumping to absolute address 0x500 for
140 * external interrupts, or the machine_check_fwnmi label
141 * for machine checks (since firmware might have patched
142 * the vector area at 0x200). The [h]rfid at the end of the
143 * handler will return to the book3s_hv_interrupts.S code.
144 * For other interrupts we do the rfid to get back
145 * to the book3s_hv_interrupts.S code here.
147 ld r8, 112+PPC_LR_STKOFF(r1)
149 ld r7, HSTATE_HOST_MSR(r13)
151 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
152 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
154 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
155 beq 15f /* Invoke the H_DOORBELL handler */
156 cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
157 beq cr2, 14f /* HMI check */
159 /* RFI into the highmem handler, or branch to interrupt handler */
163 mtmsrd r6, 1 /* Clear RI in MSR */
166 beq cr1, 13f /* machine check */
169 /* On POWER7, we have external interrupts set to use HSRR0/1 */
170 11: mtspr SPRN_HSRR0, r8
174 13: b machine_check_fwnmi
176 14: mtspr SPRN_HSRR0, r8
178 b hmi_exception_after_realmode
180 15: mtspr SPRN_HSRR0, r8
184 kvmppc_primary_no_guest:
185 /* We handle this much like a ceded vcpu */
186 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
190 * Make sure the primary has finished the MMU switch.
191 * We should never get here on a secondary thread, but
192 * check it for robustness' sake.
194 ld r5, HSTATE_KVM_VCORE(r13)
195 65: lbz r0, VCORE_IN_GUEST(r5)
202 /* set our bit in napping_threads */
203 ld r5, HSTATE_KVM_VCORE(r13)
204 lbz r7, HSTATE_PTID(r13)
207 addi r6, r5, VCORE_NAPPING_THREADS
212 /* order napping_threads update vs testing entry_exit_map */
215 lwz r7, VCORE_ENTRY_EXIT(r5)
217 bge kvm_novcpu_exit /* another thread already exiting */
218 li r3, NAPPING_NOVCPU
219 stb r3, HSTATE_NAPPING(r13)
221 li r3, 0 /* Don't wake on privileged (OS) doorbell */
226 * Entered from kvm_start_guest if kvm_hstate.napping is set
232 ld r1, HSTATE_HOST_R1(r13)
233 ld r5, HSTATE_KVM_VCORE(r13)
235 stb r0, HSTATE_NAPPING(r13)
237 /* check the wake reason */
238 bl kvmppc_check_wake_reason
241 * Restore volatile registers since we could have called
242 * a C routine in kvmppc_check_wake_reason.
245 ld r5, HSTATE_KVM_VCORE(r13)
247 /* see if any other thread is already exiting */
248 lwz r0, VCORE_ENTRY_EXIT(r5)
252 /* clear our bit in napping_threads */
253 lbz r7, HSTATE_PTID(r13)
256 addi r6, r5, VCORE_NAPPING_THREADS
262 /* See if the wake reason means we need to exit */
266 /* See if our timeslice has expired (HDEC is negative) */
268 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
272 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
273 ld r4, HSTATE_KVM_VCPU(r13)
275 beq kvmppc_primary_no_guest
277 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
278 addi r3, r4, VCPU_TB_RMENTRY
279 bl kvmhv_start_timing
284 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
285 ld r4, HSTATE_KVM_VCPU(r13)
288 addi r3, r4, VCPU_TB_RMEXIT
289 bl kvmhv_accumulate_time
293 bl kvmhv_commence_exit
296 b kvmhv_switch_to_host
299 * We come in here when wakened from nap mode.
300 * Relocation is off and most register values are lost.
301 * r13 points to the PACA.
303 .globl kvm_start_guest
306 /* Set runlatch bit the minute you wake up from nap */
313 li r0,KVM_HWTHREAD_IN_KVM
314 stb r0,HSTATE_HWTHREAD_STATE(r13)
316 /* NV GPR values from power7_idle() will no longer be valid */
318 stb r0,PACA_NAPSTATELOST(r13)
320 /* were we napping due to cede? */
321 lbz r0,HSTATE_NAPPING(r13)
322 cmpwi r0,NAPPING_CEDE
324 cmpwi r0,NAPPING_NOVCPU
325 beq kvm_novcpu_wakeup
327 ld r1,PACAEMERGSP(r13)
328 subi r1,r1,STACK_FRAME_OVERHEAD
331 * We weren't napping due to cede, so this must be a secondary
332 * thread being woken up to run a guest, or being woken up due
333 * to a stray IPI. (Or due to some machine check or hypervisor
334 * maintenance interrupt while the core is in KVM.)
337 /* Check the wake reason in SRR1 to see why we got here */
338 bl kvmppc_check_wake_reason
340 * kvmppc_check_wake_reason could invoke a C routine, but we
341 * have no volatile registers to restore when we return.
347 /* get vcore pointer, NULL if we have nothing to run */
348 ld r5,HSTATE_KVM_VCORE(r13)
350 /* if we have no vcore to run, go back to sleep */
353 kvm_secondary_got_guest:
355 /* Set HSTATE_DSCR(r13) to something sensible */
356 ld r6, PACA_DSCR_DEFAULT(r13)
357 std r6, HSTATE_DSCR(r13)
359 /* On thread 0 of a subcore, set HDEC to max */
360 lbz r4, HSTATE_PTID(r13)
366 /* and set per-LPAR registers, if doing dynamic micro-threading */
367 ld r6, HSTATE_SPLIT_MODE(r13)
370 ld r0, KVM_SPLIT_RPR(r6)
372 ld r0, KVM_SPLIT_PMMAR(r6)
374 ld r0, KVM_SPLIT_LDBAR(r6)
378 /* Order load of vcpu after load of vcore */
380 ld r4, HSTATE_KVM_VCPU(r13)
383 /* Back from the guest, go back to nap */
384 /* Clear our vcpu and vcore pointers so we don't come back in early */
386 std r0, HSTATE_KVM_VCPU(r13)
388 * Once we clear HSTATE_KVM_VCORE(r13), the code in
389 * kvmppc_run_core() is going to assume that all our vcpu
390 * state is visible in memory. This lwsync makes sure
394 std r0, HSTATE_KVM_VCORE(r13)
397 * All secondaries exiting guest will fall through this path.
398 * Before proceeding, just check for HMI interrupt and
399 * invoke opal hmi handler. By now we are sure that the
400 * primary thread on this core/subcore has already made partition
401 * switch/TB resync and we are good to call opal hmi handler.
403 cmpwi r12, BOOK3S_INTERRUPT_HMI
406 li r3,0 /* NULL argument */
407 bl hmi_exception_realmode
409 * At this point we have finished executing in the guest.
410 * We need to wait for hwthread_req to become zero, since
411 * we may not turn on the MMU while hwthread_req is non-zero.
412 * While waiting we also need to check if we get given a vcpu to run.
415 lbz r3, HSTATE_HWTHREAD_REQ(r13)
419 li r0, KVM_HWTHREAD_IN_KERNEL
420 stb r0, HSTATE_HWTHREAD_STATE(r13)
421 /* need to recheck hwthread_req after a barrier, to avoid race */
423 lbz r3, HSTATE_HWTHREAD_REQ(r13)
427 * We jump to pnv_wakeup_loss, which will return to the caller
428 * of power7_nap in the powernv cpu offline loop. The value we
429 * put in r3 becomes the return value for power7_nap.
433 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
439 ld r5, HSTATE_KVM_VCORE(r13)
442 ld r3, HSTATE_SPLIT_MODE(r13)
445 lbz r0, KVM_SPLIT_DO_NAP(r3)
451 b kvm_secondary_got_guest
453 54: li r0, KVM_HWTHREAD_IN_KVM
454 stb r0, HSTATE_HWTHREAD_STATE(r13)
458 * Here the primary thread is trying to return the core to
459 * whole-core mode, so we need to nap.
463 * When secondaries are napping in kvm_unsplit_nap() with
464 * hwthread_req = 1, HMI goes ignored even though subcores are
465 * already exited the guest. Hence HMI keeps waking up secondaries
466 * from nap in a loop and secondaries always go back to nap since
467 * no vcore is assigned to them. This makes impossible for primary
468 * thread to get hold of secondary threads resulting into a soft
469 * lockup in KVM path.
471 * Let us check if HMI is pending and handle it before we go to nap.
473 cmpwi r12, BOOK3S_INTERRUPT_HMI
475 li r3, 0 /* NULL argument */
476 bl hmi_exception_realmode
479 * Ensure that secondary doesn't nap when it has
480 * its vcore pointer set.
482 sync /* matches smp_mb() before setting split_info.do_nap */
483 ld r0, HSTATE_KVM_VCORE(r13)
486 /* clear any pending message */
488 lis r6, (PPC_DBELL_SERVER << (63-36))@h
490 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
491 /* Set kvm_split_mode.napped[tid] = 1 */
492 ld r3, HSTATE_SPLIT_MODE(r13)
494 lhz r4, PACAPACAINDEX(r13)
495 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */
496 addi r4, r4, KVM_SPLIT_NAPPED
498 /* Check the do_nap flag again after setting napped[] */
500 lbz r0, KVM_SPLIT_DO_NAP(r3)
503 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
505 rlwimi r4, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
508 std r0, HSTATE_SCRATCH0(r13)
510 ld r0, HSTATE_SCRATCH0(r13)
520 /******************************************************************************
524 *****************************************************************************/
526 .global kvmppc_hv_entry
531 * R4 = vcpu pointer (or NULL)
536 * all other volatile GPRS = free
539 std r0, PPC_LR_STKOFF(r1)
542 /* Save R1 in the PACA */
543 std r1, HSTATE_HOST_R1(r13)
545 li r6, KVM_GUEST_MODE_HOST_HV
546 stb r6, HSTATE_IN_GUEST(r13)
548 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
549 /* Store initial timestamp */
552 addi r3, r4, VCPU_TB_RMENTRY
553 bl kvmhv_start_timing
563 * POWER7/POWER8 host -> guest partition switch code.
564 * We don't have to lock against concurrent tlbies,
565 * but we do have to coordinate across hardware threads.
567 /* Set bit in entry map iff exit map is zero. */
568 ld r5, HSTATE_KVM_VCORE(r13)
570 lbz r6, HSTATE_PTID(r13)
572 addi r9, r5, VCORE_ENTRY_EXIT
574 cmpwi r3, 0x100 /* any threads starting to exit? */
575 bge secondary_too_late /* if so we're too late to the party */
580 /* Primary thread switches to guest partition. */
581 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
586 li r0,LPID_RSVD /* switch to reserved LPID */
589 mtspr SPRN_SDR1,r6 /* switch to partition page table */
593 /* See if we need to flush the TLB */
594 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
595 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
596 srdi r6,r6,6 /* doubleword number */
597 sldi r6,r6,3 /* address offset */
599 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
605 23: ldarx r7,0,r6 /* if set, clear the bit */
609 /* Flush the TLB of any entries for this LPID */
610 /* use arch 2.07S as a proxy for POWER8 */
612 li r6,512 /* POWER8 has 512 sets */
614 li r6,128 /* POWER7 has 128 sets */
615 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
617 li r7,0x800 /* IS field = 0b10 */
624 /* Add timebase offset onto timebase */
625 22: ld r8,VCORE_TB_OFFSET(r5)
628 mftb r6 /* current host timebase */
630 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
631 mftb r7 /* check if lower 24 bits overflowed */
636 addis r8,r8,0x100 /* if so, increment upper 40 bits */
639 /* Load guest PCR value to select appropriate compat mode */
640 37: ld r7, VCORE_PCR(r5)
647 /* DPDES and VTB are shared between threads */
648 ld r8, VCORE_DPDES(r5)
652 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
654 /* Mark the subcore state as inside guest */
655 bl kvmppc_subcore_enter_guest
657 ld r5, HSTATE_KVM_VCORE(r13)
658 ld r4, HSTATE_KVM_VCPU(r13)
660 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
662 /* Do we have a guest vcpu to run? */
664 beq kvmppc_primary_no_guest
667 /* Load up guest SLB entries */
668 lwz r5,VCPU_SLB_MAX(r4)
673 1: ld r8,VCPU_SLB_E(r6)
676 addi r6,r6,VCPU_SLB_SIZE
679 /* Increment yield count if they have a VPA */
683 li r6, LPPACA_YIELDCOUNT
688 stb r6, VCPU_VPA_DIRTY(r4)
691 /* Save purr/spurr */
694 std r5,HSTATE_PURR(r13)
695 std r6,HSTATE_SPURR(r13)
702 /* Set partition DABR */
703 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
704 lwz r5,VCPU_DABRX(r4)
709 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
711 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
714 END_FTR_SECTION_IFSET(CPU_FTR_TM)
717 /* Load guest PMU registers */
718 /* R4 is live here (vcpu pointer) */
720 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
721 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
725 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
728 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
729 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
730 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
731 lwz r6, VCPU_PMC + 8(r4)
732 lwz r7, VCPU_PMC + 12(r4)
733 lwz r8, VCPU_PMC + 16(r4)
734 lwz r9, VCPU_PMC + 20(r4)
742 ld r5, VCPU_MMCR + 8(r4)
743 ld r6, VCPU_MMCR + 16(r4)
751 ld r5, VCPU_MMCR + 24(r4)
753 lwz r7, VCPU_PMC + 24(r4)
754 lwz r8, VCPU_PMC + 28(r4)
755 ld r9, VCPU_MMCR + 32(r4)
761 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
765 /* Load up FP, VMX and VSX registers */
768 ld r14, VCPU_GPR(R14)(r4)
769 ld r15, VCPU_GPR(R15)(r4)
770 ld r16, VCPU_GPR(R16)(r4)
771 ld r17, VCPU_GPR(R17)(r4)
772 ld r18, VCPU_GPR(R18)(r4)
773 ld r19, VCPU_GPR(R19)(r4)
774 ld r20, VCPU_GPR(R20)(r4)
775 ld r21, VCPU_GPR(R21)(r4)
776 ld r22, VCPU_GPR(R22)(r4)
777 ld r23, VCPU_GPR(R23)(r4)
778 ld r24, VCPU_GPR(R24)(r4)
779 ld r25, VCPU_GPR(R25)(r4)
780 ld r26, VCPU_GPR(R26)(r4)
781 ld r27, VCPU_GPR(R27)(r4)
782 ld r28, VCPU_GPR(R28)(r4)
783 ld r29, VCPU_GPR(R29)(r4)
784 ld r30, VCPU_GPR(R30)(r4)
785 ld r31, VCPU_GPR(R31)(r4)
787 /* Switch DSCR to guest value */
792 /* Skip next section on POWER7 */
794 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
795 /* Load up POWER8-specific registers */
797 lwz r6, VCPU_PSPB(r4)
803 ld r6, VCPU_DAWRX(r4)
804 ld r7, VCPU_CIABR(r4)
811 ld r8, VCPU_EBBHR(r4)
814 ld r5, VCPU_EBBRR(r4)
815 ld r6, VCPU_BESCR(r4)
816 ld r7, VCPU_CSIGR(r4)
822 ld r5, VCPU_TCSCR(r4)
824 lwz r7, VCPU_GUEST_PID(r4)
833 * Set the decrementer to the guest decrementer.
835 ld r8,VCPU_DEC_EXPIRES(r4)
836 /* r8 is a host timebase value here, convert to guest TB */
837 ld r5,HSTATE_KVM_VCORE(r13)
838 ld r6,VCORE_TB_OFFSET(r5)
845 ld r5, VCPU_SPRG0(r4)
846 ld r6, VCPU_SPRG1(r4)
847 ld r7, VCPU_SPRG2(r4)
848 ld r8, VCPU_SPRG3(r4)
854 /* Load up DAR and DSISR */
856 lwz r6, VCPU_DSISR(r4)
860 /* Restore AMR and UAMOR, set AMOR to all 1s */
868 /* Restore state of CTRL run bit; assume 1 on entry */
876 /* Secondary threads wait for primary to have done partition switch */
877 ld r5, HSTATE_KVM_VCORE(r13)
878 lbz r6, HSTATE_PTID(r13)
881 lbz r0, VCORE_IN_GUEST(r5)
885 20: lwz r3, VCORE_ENTRY_EXIT(r5)
888 lbz r0, VCORE_IN_GUEST(r5)
898 /* Check if HDEC expires soon */
900 cmpwi r3, 512 /* 1 microsecond */
903 deliver_guest_interrupt:
910 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
918 /* r11 = vcpu->arch.msr & ~MSR_HV */
919 rldicl r11, r11, 63 - MSR_HV_LG, 1
920 rotldi r11, r11, 1 + MSR_HV_LG
923 /* Check if we can deliver an external or decrementer interrupt now */
924 ld r0, VCPU_PENDING_EXC(r4)
925 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
927 andi. r8, r11, MSR_EE
929 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
930 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
934 li r0, BOOK3S_INTERRUPT_EXTERNAL
938 li r0, BOOK3S_INTERRUPT_DECREMENTER
941 12: mtspr SPRN_SRR0, r10
945 bl kvmppc_msr_interrupt
951 * R10: value for HSRR0
952 * R11: value for HSRR1
957 stb r0,VCPU_CEDED(r4) /* cancel cede */
961 /* Activate guest mode, so faults get handled by KVM */
962 li r9, KVM_GUEST_MODE_GUEST_HV
963 stb r9, HSTATE_IN_GUEST(r13)
965 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
966 /* Accumulate timing */
967 addi r3, r4, VCPU_TB_GUEST
968 bl kvmhv_accumulate_time
976 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
979 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
986 ld r1, VCPU_GPR(R1)(r4)
987 ld r2, VCPU_GPR(R2)(r4)
988 ld r3, VCPU_GPR(R3)(r4)
989 ld r5, VCPU_GPR(R5)(r4)
990 ld r6, VCPU_GPR(R6)(r4)
991 ld r7, VCPU_GPR(R7)(r4)
992 ld r8, VCPU_GPR(R8)(r4)
993 ld r9, VCPU_GPR(R9)(r4)
994 ld r10, VCPU_GPR(R10)(r4)
995 ld r11, VCPU_GPR(R11)(r4)
996 ld r12, VCPU_GPR(R12)(r4)
997 ld r13, VCPU_GPR(R13)(r4)
1001 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1002 ld r0, VCPU_GPR(R0)(r4)
1003 ld r4, VCPU_GPR(R4)(r4)
1012 stw r12, VCPU_TRAP(r4)
1013 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1014 addi r3, r4, VCPU_TB_RMEXIT
1015 bl kvmhv_accumulate_time
1017 11: b kvmhv_switch_to_host
1024 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
1025 12: stw r12, VCPU_TRAP(r4)
1027 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1028 addi r3, r4, VCPU_TB_RMEXIT
1029 bl kvmhv_accumulate_time
1033 /******************************************************************************
1037 *****************************************************************************/
1040 * We come here from the first-level interrupt handlers.
1042 .globl kvmppc_interrupt_hv
1043 kvmppc_interrupt_hv:
1045 * Register contents:
1046 * R12 = interrupt vector
1048 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
1049 * guest R13 saved in SPRN_SCRATCH0
1051 std r9, HSTATE_SCRATCH2(r13)
1053 lbz r9, HSTATE_IN_GUEST(r13)
1054 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1055 beq kvmppc_bad_host_intr
1056 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1057 cmpwi r9, KVM_GUEST_MODE_GUEST
1058 ld r9, HSTATE_SCRATCH2(r13)
1059 beq kvmppc_interrupt_pr
1061 /* We're now back in the host but in guest MMU context */
1062 li r9, KVM_GUEST_MODE_HOST_HV
1063 stb r9, HSTATE_IN_GUEST(r13)
1065 ld r9, HSTATE_KVM_VCPU(r13)
1067 /* Save registers */
1069 std r0, VCPU_GPR(R0)(r9)
1070 std r1, VCPU_GPR(R1)(r9)
1071 std r2, VCPU_GPR(R2)(r9)
1072 std r3, VCPU_GPR(R3)(r9)
1073 std r4, VCPU_GPR(R4)(r9)
1074 std r5, VCPU_GPR(R5)(r9)
1075 std r6, VCPU_GPR(R6)(r9)
1076 std r7, VCPU_GPR(R7)(r9)
1077 std r8, VCPU_GPR(R8)(r9)
1078 ld r0, HSTATE_SCRATCH2(r13)
1079 std r0, VCPU_GPR(R9)(r9)
1080 std r10, VCPU_GPR(R10)(r9)
1081 std r11, VCPU_GPR(R11)(r9)
1082 ld r3, HSTATE_SCRATCH0(r13)
1083 lwz r4, HSTATE_SCRATCH1(r13)
1084 std r3, VCPU_GPR(R12)(r9)
1087 ld r3, HSTATE_CFAR(r13)
1088 std r3, VCPU_CFAR(r9)
1089 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1091 ld r4, HSTATE_PPR(r13)
1092 std r4, VCPU_PPR(r9)
1093 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1095 /* Restore R1/R2 so we can handle faults */
1096 ld r1, HSTATE_HOST_R1(r13)
1099 mfspr r10, SPRN_SRR0
1100 mfspr r11, SPRN_SRR1
1101 std r10, VCPU_SRR0(r9)
1102 std r11, VCPU_SRR1(r9)
1103 andi. r0, r12, 2 /* need to read HSRR0/1? */
1105 mfspr r10, SPRN_HSRR0
1106 mfspr r11, SPRN_HSRR1
1108 1: std r10, VCPU_PC(r9)
1109 std r11, VCPU_MSR(r9)
1113 std r3, VCPU_GPR(R13)(r9)
1116 stw r12,VCPU_TRAP(r9)
1118 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1119 addi r3, r9, VCPU_TB_RMINTR
1121 bl kvmhv_accumulate_time
1122 ld r5, VCPU_GPR(R5)(r9)
1123 ld r6, VCPU_GPR(R6)(r9)
1124 ld r7, VCPU_GPR(R7)(r9)
1125 ld r8, VCPU_GPR(R8)(r9)
1128 /* Save HEIR (HV emulation assist reg) in emul_inst
1129 if this is an HEI (HV emulation interrupt, e40) */
1130 li r3,KVM_INST_FETCH_FAILED
1131 stw r3,VCPU_LAST_INST(r9)
1132 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1135 11: stw r3,VCPU_HEIR(r9)
1137 /* these are volatile across C function calls */
1140 std r3, VCPU_CTR(r9)
1141 std r4, VCPU_XER(r9)
1143 /* If this is a page table miss then see if it's theirs or ours */
1144 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1146 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1149 /* See if this is a leftover HDEC interrupt */
1150 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1155 bge fast_guest_return
1157 /* See if this is an hcall we can handle in real mode */
1158 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1159 beq hcall_try_real_mode
1161 /* Hypervisor doorbell - exit only if host IPI flag set */
1162 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1164 lbz r0, HSTATE_HOST_IPI(r13)
1169 /* External interrupt ? */
1170 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1171 bne+ guest_exit_cont
1173 /* External interrupt, first check for host_ipi. If this is
1174 * set, we know the host wants us out so let's do it now
1179 * Restore the active volatile registers after returning from
1182 ld r9, HSTATE_KVM_VCPU(r13)
1183 li r12, BOOK3S_INTERRUPT_EXTERNAL
1186 * kvmppc_read_intr return codes:
1188 * Exit to host (r3 > 0)
1189 * 1 An interrupt is pending that needs to be handled by the host
1190 * Exit guest and return to host by branching to guest_exit_cont
1192 * 2 Passthrough that needs completion in the host
1193 * Exit guest and return to host by branching to guest_exit_cont
1194 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
1195 * to indicate to the host to complete handling the interrupt
1197 * Before returning to guest, we check if any CPU is heading out
1198 * to the host and if so, we head out also. If no CPUs are heading
1199 * check return values <= 0.
1201 * Return to guest (r3 <= 0)
1202 * 0 No external interrupt is pending
1203 * -1 A guest wakeup IPI (which has now been cleared)
1204 * In either case, we return to guest to deliver any pending
1207 * -2 A PCI passthrough external interrupt was handled
1208 * (interrupt was delivered directly to guest)
1209 * Return to guest to deliver any pending guest interrupts.
1215 /* Return code = 2 */
1216 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
1217 stw r12, VCPU_TRAP(r9)
1220 1: /* Return code <= 1 */
1224 /* Return code <= 0 */
1225 4: ld r5, HSTATE_KVM_VCORE(r13)
1226 lwz r0, VCORE_ENTRY_EXIT(r5)
1229 blt deliver_guest_interrupt
1231 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1232 /* Save more register state */
1235 std r6, VCPU_DAR(r9)
1236 stw r7, VCPU_DSISR(r9)
1237 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1238 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1240 std r6, VCPU_FAULT_DAR(r9)
1241 stw r7, VCPU_FAULT_DSISR(r9)
1243 /* See if it is a machine check */
1244 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1245 beq machine_check_realmode
1247 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1248 addi r3, r9, VCPU_TB_RMEXIT
1250 bl kvmhv_accumulate_time
1254 /* Increment exit count, poke other threads to exit */
1255 bl kvmhv_commence_exit
1257 ld r9, HSTATE_KVM_VCPU(r13)
1258 lwz r12, VCPU_TRAP(r9)
1260 /* Stop others sending VCPU interrupts to this physical CPU */
1262 stw r0, VCPU_CPU(r9)
1263 stw r0, VCPU_THREAD_CPU(r9)
1265 /* Save guest CTRL register, set runlatch to 1 */
1267 stw r6,VCPU_CTRL(r9)
1273 /* Read the guest SLB and save it away */
1274 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1280 andis. r0,r8,SLB_ESID_V@h
1282 add r8,r8,r6 /* put index in */
1284 std r8,VCPU_SLB_E(r7)
1285 std r3,VCPU_SLB_V(r7)
1286 addi r7,r7,VCPU_SLB_SIZE
1290 stw r5,VCPU_SLB_MAX(r9)
1293 * Save the guest PURR/SPURR
1298 ld r8,VCPU_SPURR(r9)
1299 std r5,VCPU_PURR(r9)
1300 std r6,VCPU_SPURR(r9)
1305 * Restore host PURR/SPURR and add guest times
1306 * so that the time in the guest gets accounted.
1308 ld r3,HSTATE_PURR(r13)
1309 ld r4,HSTATE_SPURR(r13)
1320 /* r5 is a guest timebase value here, convert to host TB */
1321 ld r3,HSTATE_KVM_VCORE(r13)
1322 ld r4,VCORE_TB_OFFSET(r3)
1324 std r5,VCPU_DEC_EXPIRES(r9)
1328 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1329 /* Save POWER8-specific registers */
1333 std r5, VCPU_IAMR(r9)
1334 stw r6, VCPU_PSPB(r9)
1335 std r7, VCPU_FSCR(r9)
1339 std r7, VCPU_TAR(r9)
1340 mfspr r8, SPRN_EBBHR
1341 std r8, VCPU_EBBHR(r9)
1342 mfspr r5, SPRN_EBBRR
1343 mfspr r6, SPRN_BESCR
1344 mfspr r7, SPRN_CSIGR
1346 std r5, VCPU_EBBRR(r9)
1347 std r6, VCPU_BESCR(r9)
1348 std r7, VCPU_CSIGR(r9)
1349 std r8, VCPU_TACR(r9)
1350 mfspr r5, SPRN_TCSCR
1354 std r5, VCPU_TCSCR(r9)
1355 std r6, VCPU_ACOP(r9)
1356 stw r7, VCPU_GUEST_PID(r9)
1357 std r8, VCPU_WORT(r9)
1359 * Restore various registers to 0, where non-zero values
1360 * set by the guest could disrupt the host.
1364 mtspr SPRN_CIABR, r0
1365 mtspr SPRN_DAWRX, r0
1366 mtspr SPRN_TCSCR, r0
1368 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1371 mtspr SPRN_MMCRS, r0
1374 /* Save and reset AMR and UAMOR before turning on the MMU */
1378 std r6,VCPU_UAMOR(r9)
1382 /* Switch DSCR back to host value */
1384 ld r7, HSTATE_DSCR(r13)
1385 std r8, VCPU_DSCR(r9)
1388 /* Save non-volatile GPRs */
1389 std r14, VCPU_GPR(R14)(r9)
1390 std r15, VCPU_GPR(R15)(r9)
1391 std r16, VCPU_GPR(R16)(r9)
1392 std r17, VCPU_GPR(R17)(r9)
1393 std r18, VCPU_GPR(R18)(r9)
1394 std r19, VCPU_GPR(R19)(r9)
1395 std r20, VCPU_GPR(R20)(r9)
1396 std r21, VCPU_GPR(R21)(r9)
1397 std r22, VCPU_GPR(R22)(r9)
1398 std r23, VCPU_GPR(R23)(r9)
1399 std r24, VCPU_GPR(R24)(r9)
1400 std r25, VCPU_GPR(R25)(r9)
1401 std r26, VCPU_GPR(R26)(r9)
1402 std r27, VCPU_GPR(R27)(r9)
1403 std r28, VCPU_GPR(R28)(r9)
1404 std r29, VCPU_GPR(R29)(r9)
1405 std r30, VCPU_GPR(R30)(r9)
1406 std r31, VCPU_GPR(R31)(r9)
1409 mfspr r3, SPRN_SPRG0
1410 mfspr r4, SPRN_SPRG1
1411 mfspr r5, SPRN_SPRG2
1412 mfspr r6, SPRN_SPRG3
1413 std r3, VCPU_SPRG0(r9)
1414 std r4, VCPU_SPRG1(r9)
1415 std r5, VCPU_SPRG2(r9)
1416 std r6, VCPU_SPRG3(r9)
1422 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1425 END_FTR_SECTION_IFSET(CPU_FTR_TM)
1428 /* Increment yield count if they have a VPA */
1429 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1432 li r4, LPPACA_YIELDCOUNT
1437 stb r3, VCPU_VPA_DIRTY(r9)
1439 /* Save PMU registers if requested */
1440 /* r8 and cr0.eq are live here */
1443 * POWER8 seems to have a hardware bug where setting
1444 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1445 * when some counters are already negative doesn't seem
1446 * to cause a performance monitor alert (and hence interrupt).
1447 * The effect of this is that when saving the PMU state,
1448 * if there is no PMU alert pending when we read MMCR0
1449 * before freezing the counters, but one becomes pending
1450 * before we read the counters, we lose it.
1451 * To work around this, we need a way to freeze the counters
1452 * before reading MMCR0. Normally, freezing the counters
1453 * is done by writing MMCR0 (to set MMCR0[FC]) which
1454 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1455 * we can also freeze the counters using MMCR2, by writing
1456 * 1s to all the counter freeze condition bits (there are
1457 * 9 bits each for 6 counters).
1459 li r3, -1 /* set all freeze bits */
1461 mfspr r10, SPRN_MMCR2
1462 mtspr SPRN_MMCR2, r3
1464 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1466 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1467 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1468 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1469 mfspr r6, SPRN_MMCRA
1470 /* Clear MMCRA in order to disable SDAR updates */
1472 mtspr SPRN_MMCRA, r7
1474 beq 21f /* if no VPA, save PMU stuff anyway */
1475 lbz r7, LPPACA_PMCINUSE(r8)
1476 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1478 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1480 21: mfspr r5, SPRN_MMCR1
1483 std r4, VCPU_MMCR(r9)
1484 std r5, VCPU_MMCR + 8(r9)
1485 std r6, VCPU_MMCR + 16(r9)
1487 std r10, VCPU_MMCR + 24(r9)
1488 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1489 std r7, VCPU_SIAR(r9)
1490 std r8, VCPU_SDAR(r9)
1497 stw r3, VCPU_PMC(r9)
1498 stw r4, VCPU_PMC + 4(r9)
1499 stw r5, VCPU_PMC + 8(r9)
1500 stw r6, VCPU_PMC + 12(r9)
1501 stw r7, VCPU_PMC + 16(r9)
1502 stw r8, VCPU_PMC + 20(r9)
1505 mfspr r6, SPRN_SPMC1
1506 mfspr r7, SPRN_SPMC2
1507 mfspr r8, SPRN_MMCRS
1508 std r5, VCPU_SIER(r9)
1509 stw r6, VCPU_PMC + 24(r9)
1510 stw r7, VCPU_PMC + 28(r9)
1511 std r8, VCPU_MMCR + 32(r9)
1513 mtspr SPRN_MMCRS, r4
1514 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1523 * POWER7/POWER8 guest -> host partition switch code.
1524 * We don't have to lock against tlbies but we do
1525 * have to coordinate the hardware threads.
1527 kvmhv_switch_to_host:
1528 /* Secondary threads wait for primary to do partition switch */
1529 ld r5,HSTATE_KVM_VCORE(r13)
1530 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1531 lbz r3,HSTATE_PTID(r13)
1535 13: lbz r3,VCORE_IN_GUEST(r5)
1541 /* Primary thread waits for all the secondaries to exit guest */
1542 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1543 rlwinm r0,r3,32-8,0xff
1549 /* Did we actually switch to the guest at all? */
1550 lbz r6, VCORE_IN_GUEST(r5)
1554 /* Primary thread switches back to host partition */
1555 ld r6,KVM_HOST_SDR1(r4)
1556 lwz r7,KVM_HOST_LPID(r4)
1557 li r8,LPID_RSVD /* switch to reserved LPID */
1560 mtspr SPRN_SDR1,r6 /* switch to partition page table */
1565 /* DPDES and VTB are shared between threads */
1566 mfspr r7, SPRN_DPDES
1568 std r7, VCORE_DPDES(r5)
1569 std r8, VCORE_VTB(r5)
1570 /* clear DPDES so we don't get guest doorbells in the host */
1572 mtspr SPRN_DPDES, r8
1573 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1575 /* If HMI, call kvmppc_realmode_hmi_handler() */
1576 cmpwi r12, BOOK3S_INTERRUPT_HMI
1578 bl kvmppc_realmode_hmi_handler
1580 li r12, BOOK3S_INTERRUPT_HMI
1582 * At this point kvmppc_realmode_hmi_handler would have resync-ed
1583 * the TB. Hence it is not required to subtract guest timebase
1584 * offset from timebase. So, skip it.
1586 * Also, do not call kvmppc_subcore_exit_guest() because it has
1587 * been invoked as part of kvmppc_realmode_hmi_handler().
1592 /* Subtract timebase offset from timebase */
1593 ld r8,VCORE_TB_OFFSET(r5)
1596 mftb r6 /* current guest timebase */
1598 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1599 mftb r7 /* check if lower 24 bits overflowed */
1604 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1607 17: bl kvmppc_subcore_exit_guest
1609 30: ld r5,HSTATE_KVM_VCORE(r13)
1610 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1613 ld r0, VCORE_PCR(r5)
1619 /* Signal secondary CPUs to continue */
1620 stb r0,VCORE_IN_GUEST(r5)
1621 19: lis r8,0x7fff /* MAX_INT@h */
1624 16: ld r8,KVM_HOST_LPCR(r4)
1628 /* load host SLB entries */
1629 ld r8,PACA_SLBSHADOWPTR(r13)
1631 .rept SLB_NUM_BOLTED
1632 li r3, SLBSHADOW_SAVEAREA
1636 andis. r7,r5,SLB_ESID_V@h
1642 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1643 /* Finish timing, if we have a vcpu */
1644 ld r4, HSTATE_KVM_VCPU(r13)
1648 bl kvmhv_accumulate_time
1651 /* Unset guest mode */
1652 li r0, KVM_GUEST_MODE_NONE
1653 stb r0, HSTATE_IN_GUEST(r13)
1655 ld r0, 112+PPC_LR_STKOFF(r1)
1661 * Check whether an HDSI is an HPTE not found fault or something else.
1662 * If it is an HPTE not found fault that is due to the guest accessing
1663 * a page that they have mapped but which we have paged out, then
1664 * we continue on with the guest exit path. In all other cases,
1665 * reflect the HDSI to the guest as a DSI.
1669 mfspr r6, SPRN_HDSISR
1670 /* HPTE not found fault or protection fault? */
1671 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1672 beq 1f /* if not, send it to the guest */
1673 andi. r0, r11, MSR_DR /* data relocation enabled? */
1676 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1677 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
1678 bne 7f /* if no SLB entry found */
1679 4: std r4, VCPU_FAULT_DAR(r9)
1680 stw r6, VCPU_FAULT_DSISR(r9)
1682 /* Search the hash table. */
1683 mr r3, r9 /* vcpu pointer */
1684 li r7, 1 /* data fault */
1685 bl kvmppc_hpte_hv_fault
1686 ld r9, HSTATE_KVM_VCPU(r13)
1688 ld r11, VCPU_MSR(r9)
1689 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1690 cmpdi r3, 0 /* retry the instruction */
1692 cmpdi r3, -1 /* handle in kernel mode */
1694 cmpdi r3, -2 /* MMIO emulation; need instr word */
1697 /* Synthesize a DSI (or DSegI) for the guest */
1698 ld r4, VCPU_FAULT_DAR(r9)
1700 1: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
1701 mtspr SPRN_DSISR, r6
1702 7: mtspr SPRN_DAR, r4
1703 mtspr SPRN_SRR0, r10
1704 mtspr SPRN_SRR1, r11
1706 bl kvmppc_msr_interrupt
1707 fast_interrupt_c_return:
1708 6: ld r7, VCPU_CTR(r9)
1715 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1716 ld r5, KVM_VRMA_SLB_V(r5)
1719 /* If this is for emulated MMIO, load the instruction word */
1720 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1722 /* Set guest mode to 'jump over instruction' so if lwz faults
1723 * we'll just continue at the next IP. */
1724 li r0, KVM_GUEST_MODE_SKIP
1725 stb r0, HSTATE_IN_GUEST(r13)
1727 /* Do the access with MSR:DR enabled */
1729 ori r4, r3, MSR_DR /* Enable paging for data */
1734 /* Store the result */
1735 stw r8, VCPU_LAST_INST(r9)
1737 /* Unset guest mode. */
1738 li r0, KVM_GUEST_MODE_HOST_HV
1739 stb r0, HSTATE_IN_GUEST(r13)
1743 * Similarly for an HISI, reflect it to the guest as an ISI unless
1744 * it is an HPTE not found fault for a page that we have paged out.
1747 andis. r0, r11, SRR1_ISI_NOPT@h
1749 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1752 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1753 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
1754 bne 7f /* if no SLB entry found */
1756 /* Search the hash table. */
1757 mr r3, r9 /* vcpu pointer */
1760 li r7, 0 /* instruction fault */
1761 bl kvmppc_hpte_hv_fault
1762 ld r9, HSTATE_KVM_VCPU(r13)
1764 ld r11, VCPU_MSR(r9)
1765 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1766 cmpdi r3, 0 /* retry the instruction */
1767 beq fast_interrupt_c_return
1768 cmpdi r3, -1 /* handle in kernel mode */
1771 /* Synthesize an ISI (or ISegI) for the guest */
1773 1: li r0, BOOK3S_INTERRUPT_INST_STORAGE
1774 7: mtspr SPRN_SRR0, r10
1775 mtspr SPRN_SRR1, r11
1777 bl kvmppc_msr_interrupt
1778 b fast_interrupt_c_return
1780 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1781 ld r5, KVM_VRMA_SLB_V(r6)
1785 * Try to handle an hcall in real mode.
1786 * Returns to the guest if we handle it, or continues on up to
1787 * the kernel if we can't (i.e. if we don't have a handler for
1788 * it, or if the handler returns H_TOO_HARD).
1790 * r5 - r8 contain hcall args,
1791 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
1793 hcall_try_real_mode:
1794 ld r3,VCPU_GPR(R3)(r9)
1796 /* sc 1 from userspace - reflect to guest syscall */
1797 bne sc_1_fast_return
1799 cmpldi r3,hcall_real_table_end - hcall_real_table
1801 /* See if this hcall is enabled for in-kernel handling */
1803 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
1804 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
1806 ld r0, KVM_ENABLED_HCALLS(r4)
1807 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
1811 /* Get pointer to handler, if any, and call it */
1812 LOAD_REG_ADDR(r4, hcall_real_table)
1818 mr r3,r9 /* get vcpu pointer */
1819 ld r4,VCPU_GPR(R4)(r9)
1822 beq hcall_real_fallback
1823 ld r4,HSTATE_KVM_VCPU(r13)
1824 std r3,VCPU_GPR(R3)(r4)
1832 li r10, BOOK3S_INTERRUPT_SYSCALL
1833 bl kvmppc_msr_interrupt
1837 /* We've attempted a real mode hcall, but it's punted it back
1838 * to userspace. We need to restore some clobbered volatiles
1839 * before resuming the pass-it-to-qemu path */
1840 hcall_real_fallback:
1841 li r12,BOOK3S_INTERRUPT_SYSCALL
1842 ld r9, HSTATE_KVM_VCPU(r13)
1846 .globl hcall_real_table
1848 .long 0 /* 0 - unused */
1849 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
1850 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
1851 .long DOTSYM(kvmppc_h_read) - hcall_real_table
1852 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
1853 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
1854 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
1855 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
1856 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
1857 .long 0 /* 0x24 - H_SET_SPRG0 */
1858 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
1873 #ifdef CONFIG_KVM_XICS
1874 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
1875 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
1876 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
1877 .long 0 /* 0x70 - H_IPOLL */
1878 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
1880 .long 0 /* 0x64 - H_EOI */
1881 .long 0 /* 0x68 - H_CPPR */
1882 .long 0 /* 0x6c - H_IPI */
1883 .long 0 /* 0x70 - H_IPOLL */
1884 .long 0 /* 0x74 - H_XIRR */
1912 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
1913 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
1929 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
1933 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
1934 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
1935 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
2048 .long DOTSYM(kvmppc_h_random) - hcall_real_table
2049 .globl hcall_real_table_end
2050 hcall_real_table_end:
2052 _GLOBAL(kvmppc_h_set_xdabr)
2053 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2055 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2058 6: li r3, H_PARAMETER
2061 _GLOBAL(kvmppc_h_set_dabr)
2062 li r5, DABRX_USER | DABRX_KERNEL
2066 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2067 std r4,VCPU_DABR(r3)
2068 stw r5, VCPU_DABRX(r3)
2069 mtspr SPRN_DABRX, r5
2070 /* Work around P7 bug where DABR can get corrupted on mtspr */
2071 1: mtspr SPRN_DABR,r4
2079 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2080 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2081 rlwimi r5, r4, 2, DAWRX_WT
2083 std r4, VCPU_DAWR(r3)
2084 std r5, VCPU_DAWRX(r3)
2086 mtspr SPRN_DAWRX, r5
2090 _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
2092 std r11,VCPU_MSR(r3)
2094 stb r0,VCPU_CEDED(r3)
2095 sync /* order setting ceded vs. testing prodded */
2096 lbz r5,VCPU_PRODDED(r3)
2098 bne kvm_cede_prodded
2099 li r12,0 /* set trap to 0 to say hcall is handled */
2100 stw r12,VCPU_TRAP(r3)
2102 std r0,VCPU_GPR(R3)(r3)
2105 * Set our bit in the bitmask of napping threads unless all the
2106 * other threads are already napping, in which case we send this
2109 ld r5,HSTATE_KVM_VCORE(r13)
2110 lbz r6,HSTATE_PTID(r13)
2111 lwz r8,VCORE_ENTRY_EXIT(r5)
2115 addi r6,r5,VCORE_NAPPING_THREADS
2122 /* order napping_threads update vs testing entry_exit_map */
2125 stb r0,HSTATE_NAPPING(r13)
2126 lwz r7,VCORE_ENTRY_EXIT(r5)
2128 bge 33f /* another thread already exiting */
2131 * Although not specifically required by the architecture, POWER7
2132 * preserves the following registers in nap mode, even if an SMT mode
2133 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2134 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2136 /* Save non-volatile GPRs */
2137 std r14, VCPU_GPR(R14)(r3)
2138 std r15, VCPU_GPR(R15)(r3)
2139 std r16, VCPU_GPR(R16)(r3)
2140 std r17, VCPU_GPR(R17)(r3)
2141 std r18, VCPU_GPR(R18)(r3)
2142 std r19, VCPU_GPR(R19)(r3)
2143 std r20, VCPU_GPR(R20)(r3)
2144 std r21, VCPU_GPR(R21)(r3)
2145 std r22, VCPU_GPR(R22)(r3)
2146 std r23, VCPU_GPR(R23)(r3)
2147 std r24, VCPU_GPR(R24)(r3)
2148 std r25, VCPU_GPR(R25)(r3)
2149 std r26, VCPU_GPR(R26)(r3)
2150 std r27, VCPU_GPR(R27)(r3)
2151 std r28, VCPU_GPR(R28)(r3)
2152 std r29, VCPU_GPR(R29)(r3)
2153 std r30, VCPU_GPR(R30)(r3)
2154 std r31, VCPU_GPR(R31)(r3)
2159 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2161 ld r9, HSTATE_KVM_VCPU(r13)
2163 END_FTR_SECTION_IFSET(CPU_FTR_TM)
2167 * Set DEC to the smaller of DEC and HDEC, so that we wake
2168 * no later than the end of our timeslice (HDEC interrupts
2169 * don't wake us from nap).
2178 /* save expiry time of guest decrementer */
2181 ld r4, HSTATE_KVM_VCPU(r13)
2182 ld r5, HSTATE_KVM_VCORE(r13)
2183 ld r6, VCORE_TB_OFFSET(r5)
2184 subf r3, r6, r3 /* convert to host TB value */
2185 std r3, VCPU_DEC_EXPIRES(r4)
2187 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2188 ld r4, HSTATE_KVM_VCPU(r13)
2189 addi r3, r4, VCPU_TB_CEDE
2190 bl kvmhv_accumulate_time
2193 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2196 * Take a nap until a decrementer or external or doobell interrupt
2197 * occurs, with PECE1 and PECE0 set in LPCR.
2198 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
2199 * Also clear the runlatch bit before napping.
2202 mfspr r0, SPRN_CTRLF
2204 mtspr SPRN_CTRLT, r0
2207 stb r0,HSTATE_HWTHREAD_REQ(r13)
2209 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2211 ori r5, r5, LPCR_PECEDH
2212 rlwimi r5, r3, 0, LPCR_PECEDP
2213 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2217 std r0, HSTATE_SCRATCH0(r13)
2219 ld r0, HSTATE_SCRATCH0(r13)
2231 /* get vcpu pointer */
2232 ld r4, HSTATE_KVM_VCPU(r13)
2234 /* Woken by external or decrementer interrupt */
2235 ld r1, HSTATE_HOST_R1(r13)
2237 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2238 addi r3, r4, VCPU_TB_RMINTR
2239 bl kvmhv_accumulate_time
2242 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2244 bl kvmppc_restore_tm
2245 END_FTR_SECTION_IFSET(CPU_FTR_TM)
2248 /* load up FP state */
2251 /* Restore guest decrementer */
2252 ld r3, VCPU_DEC_EXPIRES(r4)
2253 ld r5, HSTATE_KVM_VCORE(r13)
2254 ld r6, VCORE_TB_OFFSET(r5)
2255 add r3, r3, r6 /* convert host TB to guest TB value */
2261 ld r14, VCPU_GPR(R14)(r4)
2262 ld r15, VCPU_GPR(R15)(r4)
2263 ld r16, VCPU_GPR(R16)(r4)
2264 ld r17, VCPU_GPR(R17)(r4)
2265 ld r18, VCPU_GPR(R18)(r4)
2266 ld r19, VCPU_GPR(R19)(r4)
2267 ld r20, VCPU_GPR(R20)(r4)
2268 ld r21, VCPU_GPR(R21)(r4)
2269 ld r22, VCPU_GPR(R22)(r4)
2270 ld r23, VCPU_GPR(R23)(r4)
2271 ld r24, VCPU_GPR(R24)(r4)
2272 ld r25, VCPU_GPR(R25)(r4)
2273 ld r26, VCPU_GPR(R26)(r4)
2274 ld r27, VCPU_GPR(R27)(r4)
2275 ld r28, VCPU_GPR(R28)(r4)
2276 ld r29, VCPU_GPR(R29)(r4)
2277 ld r30, VCPU_GPR(R30)(r4)
2278 ld r31, VCPU_GPR(R31)(r4)
2280 /* Check the wake reason in SRR1 to see why we got here */
2281 bl kvmppc_check_wake_reason
2284 * Restore volatile registers since we could have called a
2285 * C routine in kvmppc_check_wake_reason
2287 * r3 tells us whether we need to return to host or not
2288 * WARNING: it gets checked further down:
2289 * should not modify r3 until this check is done.
2291 ld r4, HSTATE_KVM_VCPU(r13)
2293 /* clear our bit in vcore->napping_threads */
2294 34: ld r5,HSTATE_KVM_VCORE(r13)
2295 lbz r7,HSTATE_PTID(r13)
2298 addi r6,r5,VCORE_NAPPING_THREADS
2304 stb r0,HSTATE_NAPPING(r13)
2306 /* See if the wake reason saved in r3 means we need to exit */
2307 stw r12, VCPU_TRAP(r4)
2312 /* see if any other thread is already exiting */
2313 lwz r0,VCORE_ENTRY_EXIT(r5)
2317 b kvmppc_cede_reentry /* if not go back to guest */
2319 /* cede when already previously prodded case */
2322 stb r0,VCPU_PRODDED(r3)
2323 sync /* order testing prodded vs. clearing ceded */
2324 stb r0,VCPU_CEDED(r3)
2328 /* we've ceded but we want to give control to the host */
2330 ld r9, HSTATE_KVM_VCPU(r13)
2333 /* Try to handle a machine check in real mode */
2334 machine_check_realmode:
2335 mr r3, r9 /* get vcpu pointer */
2336 bl kvmppc_realmode_machine_check
2338 ld r9, HSTATE_KVM_VCPU(r13)
2339 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2341 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
2342 * machine check interrupt (set HSRR0 to 0x200). And for handled
2343 * errors (no-fatal), just go back to guest execution with current
2344 * HSRR0 instead of exiting guest. This new approach will inject
2345 * machine check to guest for fatal error causing guest to crash.
2347 * The old code used to return to host for unhandled errors which
2348 * was causing guest to hang with soft lockups inside guest and
2349 * makes it difficult to recover guest instance.
2351 * if we receive machine check with MSR(RI=0) then deliver it to
2352 * guest as machine check causing guest to crash.
2354 ld r11, VCPU_MSR(r9)
2355 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
2356 bne mc_cont /* if so, exit to host */
2357 andi. r10, r11, MSR_RI /* check for unrecoverable exception */
2358 beq 1f /* Deliver a machine check to guest */
2360 cmpdi r3, 0 /* Did we handle MCE ? */
2361 bne 2f /* Continue guest execution. */
2362 /* If not, deliver a machine check. SRR0/1 are already set */
2363 1: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2364 bl kvmppc_msr_interrupt
2365 2: b fast_interrupt_c_return
2368 * Check the reason we woke from nap, and take appropriate action.
2370 * 0 if nothing needs to be done
2371 * 1 if something happened that needs to be handled by the host
2372 * -1 if there was a guest wakeup (IPI or msgsnd)
2373 * -2 if we handled a PCI passthrough interrupt (returned by
2374 * kvmppc_read_intr only)
2376 * Also sets r12 to the interrupt vector for any interrupt that needs
2377 * to be handled now by the host (0x500 for external interrupt), or zero.
2378 * Modifies all volatile registers (since it may call a C function).
2379 * This routine calls kvmppc_read_intr, a C function, if an external
2380 * interrupt is pending.
2382 kvmppc_check_wake_reason:
2385 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2387 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2388 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2389 cmpwi r6, 8 /* was it an external interrupt? */
2390 beq 7f /* if so, see what it was */
2393 cmpwi r6, 6 /* was it the decrementer? */
2396 cmpwi r6, 5 /* privileged doorbell? */
2398 cmpwi r6, 3 /* hypervisor doorbell? */
2400 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2401 cmpwi r6, 0xa /* Hypervisor maintenance ? */
2403 li r3, 1 /* anything else, return 1 */
2406 /* hypervisor doorbell */
2407 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2410 * Clear the doorbell as we will invoke the handler
2411 * explicitly in the guest exit path.
2413 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2415 /* see if it's a host IPI */
2417 lbz r0, HSTATE_HOST_IPI(r13)
2420 /* if not, return -1 */
2424 /* Woken up due to Hypervisor maintenance interrupt */
2425 4: li r12, BOOK3S_INTERRUPT_HMI
2429 /* external interrupt - create a stack frame so we can call C */
2431 std r0, PPC_LR_STKOFF(r1)
2432 stdu r1, -PPC_MIN_STKFRM(r1)
2435 li r12, BOOK3S_INTERRUPT_EXTERNAL
2440 * Return code of 2 means PCI passthrough interrupt, but
2441 * we need to return back to host to complete handling the
2442 * interrupt. Trap reason is expected in r12 by guest
2445 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
2447 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
2448 addi r1, r1, PPC_MIN_STKFRM
2453 * Save away FP, VMX and VSX registers.
2455 * N.B. r30 and r31 are volatile across this function,
2456 * thus it is not callable from C.
2463 #ifdef CONFIG_ALTIVEC
2465 oris r8,r8,MSR_VEC@h
2466 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2470 oris r8,r8,MSR_VSX@h
2471 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2474 addi r3,r3,VCPU_FPRS
2476 #ifdef CONFIG_ALTIVEC
2478 addi r3,r31,VCPU_VRS
2480 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2482 mfspr r6,SPRN_VRSAVE
2483 stw r6,VCPU_VRSAVE(r31)
2488 * Load up FP, VMX and VSX registers
2490 * N.B. r30 and r31 are volatile across this function,
2491 * thus it is not callable from C.
2498 #ifdef CONFIG_ALTIVEC
2500 oris r8,r8,MSR_VEC@h
2501 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2505 oris r8,r8,MSR_VSX@h
2506 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2509 addi r3,r4,VCPU_FPRS
2511 #ifdef CONFIG_ALTIVEC
2513 addi r3,r31,VCPU_VRS
2515 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2517 lwz r7,VCPU_VRSAVE(r31)
2518 mtspr SPRN_VRSAVE,r7
2523 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2525 * Save transactional state and TM-related registers.
2526 * Called with r9 pointing to the vcpu struct.
2527 * This can modify all checkpointed registers, but
2528 * restores r1, r2 and r9 (vcpu pointer) before exit.
2532 std r0, PPC_LR_STKOFF(r1)
2537 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
2541 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
2542 beq 1f /* TM not active in guest. */
2544 std r1, HSTATE_HOST_R1(r13)
2545 li r3, TM_CAUSE_KVM_RESCHED
2547 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
2551 /* All GPRs are volatile at this point. */
2554 /* Temporarily store r13 and r9 so we have some regs to play with */
2557 std r9, PACATMSCRATCH(r13)
2558 ld r9, HSTATE_KVM_VCPU(r13)
2560 /* Get a few more GPRs free. */
2561 std r29, VCPU_GPRS_TM(29)(r9)
2562 std r30, VCPU_GPRS_TM(30)(r9)
2563 std r31, VCPU_GPRS_TM(31)(r9)
2565 /* Save away PPR and DSCR soon so don't run with user values. */
2568 mfspr r30, SPRN_DSCR
2569 ld r29, HSTATE_DSCR(r13)
2570 mtspr SPRN_DSCR, r29
2572 /* Save all but r9, r13 & r29-r31 */
2575 .if (reg != 9) && (reg != 13)
2576 std reg, VCPU_GPRS_TM(reg)(r9)
2580 /* ... now save r13 */
2582 std r4, VCPU_GPRS_TM(13)(r9)
2583 /* ... and save r9 */
2584 ld r4, PACATMSCRATCH(r13)
2585 std r4, VCPU_GPRS_TM(9)(r9)
2587 /* Reload stack pointer and TOC. */
2588 ld r1, HSTATE_HOST_R1(r13)
2591 /* Set MSR RI now we have r1 and r13 back. */
2595 /* Save away checkpinted SPRs. */
2596 std r31, VCPU_PPR_TM(r9)
2597 std r30, VCPU_DSCR_TM(r9)
2603 std r5, VCPU_LR_TM(r9)
2604 stw r6, VCPU_CR_TM(r9)
2605 std r7, VCPU_CTR_TM(r9)
2606 std r8, VCPU_AMR_TM(r9)
2607 std r10, VCPU_TAR_TM(r9)
2609 /* Restore r12 as trap number. */
2610 lwz r12, VCPU_TRAP(r9)
2613 addi r3, r9, VCPU_FPRS_TM
2615 addi r3, r9, VCPU_VRS_TM
2617 mfspr r6, SPRN_VRSAVE
2618 stw r6, VCPU_VRSAVE_TM(r9)
2621 * We need to save these SPRs after the treclaim so that the software
2622 * error code is recorded correctly in the TEXASR. Also the user may
2623 * change these outside of a transaction, so they must always be
2626 mfspr r5, SPRN_TFHAR
2627 mfspr r6, SPRN_TFIAR
2628 mfspr r7, SPRN_TEXASR
2629 std r5, VCPU_TFHAR(r9)
2630 std r6, VCPU_TFIAR(r9)
2631 std r7, VCPU_TEXASR(r9)
2633 ld r0, PPC_LR_STKOFF(r1)
2638 * Restore transactional state and TM-related registers.
2639 * Called with r4 pointing to the vcpu struct.
2640 * This potentially modifies all checkpointed registers.
2641 * It restores r1, r2, r4 from the PACA.
2645 std r0, PPC_LR_STKOFF(r1)
2647 /* Turn on TM/FP/VSX/VMX so we can restore them. */
2653 oris r5, r5, (MSR_VEC | MSR_VSX)@h
2657 * The user may change these outside of a transaction, so they must
2658 * always be context switched.
2660 ld r5, VCPU_TFHAR(r4)
2661 ld r6, VCPU_TFIAR(r4)
2662 ld r7, VCPU_TEXASR(r4)
2663 mtspr SPRN_TFHAR, r5
2664 mtspr SPRN_TFIAR, r6
2665 mtspr SPRN_TEXASR, r7
2668 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
2669 beqlr /* TM not active in guest */
2670 std r1, HSTATE_HOST_R1(r13)
2672 /* Make sure the failure summary is set, otherwise we'll program check
2673 * when we trechkpt. It's possible that this might have been not set
2674 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
2677 oris r7, r7, (TEXASR_FS)@h
2678 mtspr SPRN_TEXASR, r7
2681 * We need to load up the checkpointed state for the guest.
2682 * We need to do this early as it will blow away any GPRs, VSRs and
2687 addi r3, r31, VCPU_FPRS_TM
2689 addi r3, r31, VCPU_VRS_TM
2692 lwz r7, VCPU_VRSAVE_TM(r4)
2693 mtspr SPRN_VRSAVE, r7
2695 ld r5, VCPU_LR_TM(r4)
2696 lwz r6, VCPU_CR_TM(r4)
2697 ld r7, VCPU_CTR_TM(r4)
2698 ld r8, VCPU_AMR_TM(r4)
2699 ld r9, VCPU_TAR_TM(r4)
2707 * Load up PPR and DSCR values but don't put them in the actual SPRs
2708 * till the last moment to avoid running with userspace PPR and DSCR for
2711 ld r29, VCPU_DSCR_TM(r4)
2712 ld r30, VCPU_PPR_TM(r4)
2714 std r2, PACATMSCRATCH(r13) /* Save TOC */
2716 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
2720 /* Load GPRs r0-r28 */
2723 ld reg, VCPU_GPRS_TM(reg)(r31)
2727 mtspr SPRN_DSCR, r29
2730 /* Load final GPRs */
2731 ld 29, VCPU_GPRS_TM(29)(r31)
2732 ld 30, VCPU_GPRS_TM(30)(r31)
2733 ld 31, VCPU_GPRS_TM(31)(r31)
2735 /* TM checkpointed state is now setup. All GPRs are now volatile. */
2738 /* Now let's get back the state we need. */
2741 ld r29, HSTATE_DSCR(r13)
2742 mtspr SPRN_DSCR, r29
2743 ld r4, HSTATE_KVM_VCPU(r13)
2744 ld r1, HSTATE_HOST_R1(r13)
2745 ld r2, PACATMSCRATCH(r13)
2747 /* Set the MSR RI since we have our registers back. */
2751 ld r0, PPC_LR_STKOFF(r1)
2757 * We come here if we get any exception or interrupt while we are
2758 * executing host real mode code while in guest MMU context.
2759 * For now just spin, but we should do something better.
2761 kvmppc_bad_host_intr:
2765 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
2766 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2767 * r11 has the guest MSR value (in/out)
2768 * r9 has a vcpu pointer (in)
2769 * r0 is used as a scratch register
2771 kvmppc_msr_interrupt:
2772 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
2773 cmpwi r0, 2 /* Check if we are in transactional state.. */
2774 ld r11, VCPU_INTR_MSR(r9)
2776 /* ... if transactional, change to suspended */
2778 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2782 * This works around a hardware bug on POWER8E processors, where
2783 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
2784 * performance monitor interrupt. Instead, when we need to have
2785 * an interrupt pending, we have to arrange for a counter to overflow.
2789 mtspr SPRN_MMCR2, r3
2790 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
2791 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
2792 mtspr SPRN_MMCR0, r3
2799 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2801 * Start timing an activity
2802 * r3 = pointer to time accumulation struct, r4 = vcpu
2805 ld r5, HSTATE_KVM_VCORE(r13)
2806 lbz r6, VCORE_IN_GUEST(r5)
2808 beq 5f /* if in guest, need to */
2809 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
2812 std r3, VCPU_CUR_ACTIVITY(r4)
2813 std r5, VCPU_ACTIVITY_START(r4)
2817 * Accumulate time to one activity and start another.
2818 * r3 = pointer to new time accumulation struct, r4 = vcpu
2820 kvmhv_accumulate_time:
2821 ld r5, HSTATE_KVM_VCORE(r13)
2822 lbz r8, VCORE_IN_GUEST(r5)
2824 beq 4f /* if in guest, need to */
2825 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
2826 4: ld r5, VCPU_CUR_ACTIVITY(r4)
2827 ld r6, VCPU_ACTIVITY_START(r4)
2828 std r3, VCPU_CUR_ACTIVITY(r4)
2831 std r7, VCPU_ACTIVITY_START(r4)
2835 ld r8, TAS_SEQCOUNT(r5)
2838 std r8, TAS_SEQCOUNT(r5)
2840 ld r7, TAS_TOTAL(r5)
2842 std r7, TAS_TOTAL(r5)
2848 3: std r3, TAS_MIN(r5)
2854 std r8, TAS_SEQCOUNT(r5)