2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/arm-smccc.h>
19 #include <linux/types.h>
20 #include <linux/jump_label.h>
21 #include <uapi/linux/psci.h>
23 #include <kvm/arm_psci.h>
25 #include <asm/cpufeature.h>
26 #include <asm/extable.h>
27 #include <asm/kprobes.h>
28 #include <asm/kvm_asm.h>
29 #include <asm/kvm_emulate.h>
30 #include <asm/kvm_host.h>
31 #include <asm/kvm_hyp.h>
32 #include <asm/kvm_mmu.h>
33 #include <asm/fpsimd.h>
34 #include <asm/debug-monitors.h>
35 #include <asm/processor.h>
36 #include <asm/thread_info.h>
37 #include <asm/vectors.h>
39 extern struct exception_table_entry __start___kvm_ex_table;
40 extern struct exception_table_entry __stop___kvm_ex_table;
42 /* Check whether the FP regs were dirtied while in the host-side run loop: */
43 static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
46 * When the system doesn't support FP/SIMD, we cannot rely on
47 * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
48 * abort on the very first access to FP and thus we should never
49 * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
52 if (!system_supports_fpsimd() ||
53 vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
54 vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
57 return !!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED);
60 /* Save the 32-bit only FPSIMD system register state */
61 static void __hyp_text __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
63 if (!vcpu_el1_is_32bit(vcpu))
66 vcpu->arch.ctxt.sys_regs[FPEXC32_EL2] = read_sysreg(fpexc32_el2);
69 static void __hyp_text __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
72 * We are about to set CPTR_EL2.TFP to trap all floating point
73 * register accesses to EL2, however, the ARM ARM clearly states that
74 * traps are only taken to EL2 if the operation would not otherwise
75 * trap to EL1. Therefore, always make sure that for 32-bit guests,
76 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
77 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
78 * it will cause an exception.
80 if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
81 write_sysreg(1 << 30, fpexc32_el2);
86 static void __hyp_text __activate_traps_common(struct kvm_vcpu *vcpu)
88 /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
89 write_sysreg(1 << 15, hstr_el2);
92 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
93 * PMSELR_EL0 to make sure it never contains the cycle
94 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
95 * EL1 instead of being trapped to EL2.
97 write_sysreg(0, pmselr_el0);
98 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
99 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
102 static void __hyp_text __deactivate_traps_common(void)
104 write_sysreg(0, hstr_el2);
105 write_sysreg(0, pmuserenr_el0);
108 static void activate_traps_vhe(struct kvm_vcpu *vcpu)
112 val = read_sysreg(cpacr_el1);
113 val |= CPACR_EL1_TTA;
114 val &= ~CPACR_EL1_ZEN;
115 if (!update_fp_enabled(vcpu)) {
116 val &= ~CPACR_EL1_FPEN;
117 __activate_traps_fpsimd32(vcpu);
120 write_sysreg(val, cpacr_el1);
122 write_sysreg(kvm_get_hyp_vector(), vbar_el1);
124 NOKPROBE_SYMBOL(activate_traps_vhe);
126 static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
130 __activate_traps_common(vcpu);
132 val = CPTR_EL2_DEFAULT;
133 val |= CPTR_EL2_TTA | CPTR_EL2_TZ;
134 if (!update_fp_enabled(vcpu)) {
136 __activate_traps_fpsimd32(vcpu);
139 write_sysreg(val, cptr_el2);
142 static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
144 u64 hcr = vcpu->arch.hcr_el2;
146 write_sysreg(hcr, hcr_el2);
148 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
149 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
152 activate_traps_vhe(vcpu);
154 __activate_traps_nvhe(vcpu);
157 static void deactivate_traps_vhe(void)
159 const char *host_vectors = vectors;
160 write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
161 write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
163 if (!arm64_kernel_unmapped_at_el0())
164 host_vectors = __this_cpu_read(this_cpu_vector);
165 write_sysreg(host_vectors, vbar_el1);
167 NOKPROBE_SYMBOL(deactivate_traps_vhe);
169 static void __hyp_text __deactivate_traps_nvhe(void)
171 u64 mdcr_el2 = read_sysreg(mdcr_el2);
173 __deactivate_traps_common();
175 mdcr_el2 &= MDCR_EL2_HPMN_MASK;
176 mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
178 write_sysreg(mdcr_el2, mdcr_el2);
179 write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
180 write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
183 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
186 * If we pended a virtual abort, preserve it until it gets
187 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
188 * the crucial bit is "On taking a vSError interrupt,
189 * HCR_EL2.VSE is cleared to 0."
191 if (vcpu->arch.hcr_el2 & HCR_VSE)
192 vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
195 deactivate_traps_vhe();
197 __deactivate_traps_nvhe();
200 void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
202 __activate_traps_common(vcpu);
205 void deactivate_traps_vhe_put(void)
207 u64 mdcr_el2 = read_sysreg(mdcr_el2);
209 mdcr_el2 &= MDCR_EL2_HPMN_MASK |
210 MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT |
213 write_sysreg(mdcr_el2, mdcr_el2);
215 __deactivate_traps_common();
218 static void __hyp_text __activate_vm(struct kvm *kvm)
220 write_sysreg(kvm->arch.vttbr, vttbr_el2);
223 static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
225 write_sysreg(0, vttbr_el2);
228 /* Save VGICv3 state on non-VHE systems */
229 static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
231 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
232 __vgic_v3_save_state(vcpu);
233 __vgic_v3_deactivate_traps(vcpu);
237 /* Restore VGICv3 state on non_VEH systems */
238 static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
240 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
241 __vgic_v3_activate_traps(vcpu);
242 __vgic_v3_restore_state(vcpu);
246 static bool __hyp_text __true_value(void)
251 static bool __hyp_text __false_value(void)
256 static hyp_alternate_select(__check_arm_834220,
257 __false_value, __true_value,
258 ARM64_WORKAROUND_834220);
260 static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
265 * Resolve the IPA the hard way using the guest VA.
267 * Stage-1 translation already validated the memory access
268 * rights. As such, we can use the EL1 translation regime, and
269 * don't have to distinguish between EL0 and EL1 access.
271 * We do need to save/restore PAR_EL1 though, as we haven't
272 * saved the guest context yet, and we may return early...
274 par = read_sysreg(par_el1);
275 if (!__kvm_at("s1e1r", far))
276 tmp = read_sysreg(par_el1);
278 tmp = 1; /* back to the guest */
279 write_sysreg(par, par_el1);
281 if (unlikely(tmp & 1))
282 return false; /* Translation failed, back to guest */
284 /* Convert PAR to HPFAR format */
285 *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4;
289 static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
295 esr = vcpu->arch.fault.esr_el2;
296 ec = ESR_ELx_EC(esr);
298 if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
301 far = read_sysreg_el2(far);
304 * The HPFAR can be invalid if the stage 2 fault did not
305 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
306 * bit is clear) and one of the two following cases are true:
307 * 1. The fault was due to a permission fault
308 * 2. The processor carries errata 834220
310 * Therefore, for all non S1PTW faults where we either have a
311 * permission fault or the errata workaround is enabled, we
312 * resolve the IPA using the AT instruction.
314 if (!(esr & ESR_ELx_S1PTW) &&
315 (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
316 if (!__translate_far_to_hpfar(far, &hpfar))
319 hpfar = read_sysreg(hpfar_el2);
322 vcpu->arch.fault.far_el2 = far;
323 vcpu->arch.fault.hpfar_el2 = hpfar;
327 /* Skip an instruction which has been emulated. Returns true if
328 * execution can continue or false if we need to exit hyp mode because
329 * single-step was in effect.
331 static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
333 *vcpu_pc(vcpu) = read_sysreg_el2(elr);
335 if (vcpu_mode_is_32bit(vcpu)) {
336 vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
337 kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
338 write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
343 write_sysreg_el2(*vcpu_pc(vcpu), elr);
345 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
346 vcpu->arch.fault.esr_el2 =
347 (ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT) | 0x22;
354 static bool __hyp_text __hyp_switch_fpsimd(struct kvm_vcpu *vcpu)
356 struct user_fpsimd_state *host_fpsimd = vcpu->arch.host_fpsimd_state;
359 write_sysreg(read_sysreg(cpacr_el1) | CPACR_EL1_FPEN,
362 write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP,
367 if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
369 * In the SVE case, VHE is assumed: it is enforced by
370 * Kconfig and kvm_arch_init().
372 if (system_supports_sve() &&
373 (vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE)) {
374 struct thread_struct *thread = container_of(
376 struct thread_struct, uw.fpsimd_state);
378 sve_save_state(sve_pffr(thread), &host_fpsimd->fpsr);
380 __fpsimd_save_state(host_fpsimd);
383 vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
386 __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs);
388 /* Skip restoring fpexc32 for AArch64 guests */
389 if (!(read_sysreg(hcr_el2) & HCR_RW))
390 write_sysreg(vcpu->arch.ctxt.sys_regs[FPEXC32_EL2],
393 vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
399 * Return true when we were able to fixup the guest exit and should return to
400 * the guest, false when we should restore the host state and return to the
403 static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
405 if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
406 vcpu->arch.fault.esr_el2 = read_sysreg_el2(esr);
409 * We're using the raw exception code in order to only process
410 * the trap if no SError is pending. We will come back to the
411 * same PC once the SError has been injected, and replay the
412 * trapping instruction.
414 if (*exit_code != ARM_EXCEPTION_TRAP)
418 * We trap the first access to the FP/SIMD to save the host context
419 * and restore the guest context lazily.
420 * If FP/SIMD is not implemented, handle the trap and inject an
421 * undefined instruction exception to the guest.
423 if (system_supports_fpsimd() &&
424 kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_FP_ASIMD)
425 return __hyp_switch_fpsimd(vcpu);
427 if (!__populate_fault_info(vcpu))
430 if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
433 valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
434 kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
435 kvm_vcpu_dabt_isvalid(vcpu) &&
436 !kvm_vcpu_dabt_isextabt(vcpu) &&
437 !kvm_vcpu_abt_iss1tw(vcpu);
440 int ret = __vgic_v2_perform_cpuif_access(vcpu);
442 if (ret == 1 && __skip_instr(vcpu))
446 /* Promote an illegal access to an
447 * SError. If we would be returning
448 * due to single-step clear the SS
449 * bit so handle_exit knows what to
450 * do after dealing with the error.
452 if (!__skip_instr(vcpu))
453 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
454 *exit_code = ARM_EXCEPTION_EL1_SERROR;
461 if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
462 (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
463 kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
464 int ret = __vgic_v3_perform_cpuif_access(vcpu);
466 if (ret == 1 && __skip_instr(vcpu))
471 /* Return to the host kernel and handle the exit */
475 static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
477 if (!cpus_have_const_cap(ARM64_SSBD))
480 return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
483 static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
485 #ifdef CONFIG_ARM64_SSBD
487 * The host runs with the workaround always present. If the
488 * guest wants it disabled, so be it...
490 if (__needs_ssbd_off(vcpu) &&
491 __hyp_this_cpu_read(arm64_ssbd_callback_required))
492 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
496 static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
498 #ifdef CONFIG_ARM64_SSBD
500 * If the guest has disabled the workaround, bring it back on.
502 if (__needs_ssbd_off(vcpu) &&
503 __hyp_this_cpu_read(arm64_ssbd_callback_required))
504 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
508 /* Switch to the guest for VHE systems running in EL2 */
509 int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
511 struct kvm_cpu_context *host_ctxt;
512 struct kvm_cpu_context *guest_ctxt;
515 host_ctxt = vcpu->arch.host_cpu_context;
516 host_ctxt->__hyp_running_vcpu = vcpu;
517 guest_ctxt = &vcpu->arch.ctxt;
519 sysreg_save_host_state_vhe(host_ctxt);
521 __activate_traps(vcpu);
522 __activate_vm(vcpu->kvm);
524 sysreg_restore_guest_state_vhe(guest_ctxt);
525 __debug_switch_to_guest(vcpu);
527 __set_guest_arch_workaround_state(vcpu);
530 /* Jump in the fire! */
531 exit_code = __guest_enter(vcpu, host_ctxt);
533 /* And we're baaack! */
534 } while (fixup_guest_exit(vcpu, &exit_code));
536 __set_host_arch_workaround_state(vcpu);
538 sysreg_save_guest_state_vhe(guest_ctxt);
540 __deactivate_traps(vcpu);
542 sysreg_restore_host_state_vhe(host_ctxt);
544 if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
545 __fpsimd_save_fpexc32(vcpu);
547 __debug_switch_to_host(vcpu);
551 NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
553 /* Switch to the guest for legacy non-VHE systems */
554 int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
556 struct kvm_cpu_context *host_ctxt;
557 struct kvm_cpu_context *guest_ctxt;
560 vcpu = kern_hyp_va(vcpu);
562 host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
563 host_ctxt->__hyp_running_vcpu = vcpu;
564 guest_ctxt = &vcpu->arch.ctxt;
566 __sysreg_save_state_nvhe(host_ctxt);
567 __debug_save_host_buffers_nvhe(vcpu);
569 __activate_traps(vcpu);
570 __activate_vm(kern_hyp_va(vcpu->kvm));
572 __hyp_vgic_restore_state(vcpu);
573 __timer_enable_traps(vcpu);
576 * We must restore the 32-bit state before the sysregs, thanks
577 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
579 __sysreg32_restore_state(vcpu);
580 __sysreg_restore_state_nvhe(guest_ctxt);
581 __debug_switch_to_guest(vcpu);
583 __set_guest_arch_workaround_state(vcpu);
586 /* Jump in the fire! */
587 exit_code = __guest_enter(vcpu, host_ctxt);
589 /* And we're baaack! */
590 } while (fixup_guest_exit(vcpu, &exit_code));
592 __set_host_arch_workaround_state(vcpu);
594 __sysreg_save_state_nvhe(guest_ctxt);
595 __sysreg32_save_state(vcpu);
596 __timer_disable_traps(vcpu);
597 __hyp_vgic_save_state(vcpu);
599 __deactivate_traps(vcpu);
600 __deactivate_vm(vcpu);
602 __sysreg_restore_state_nvhe(host_ctxt);
604 if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
605 __fpsimd_save_fpexc32(vcpu);
607 __debug_switch_to_host(vcpu);
609 * This must come after restoring the host sysregs, since a non-VHE
610 * system may enable SPE here and make use of the TTBRs.
612 __debug_restore_host_buffers_nvhe(vcpu);
617 static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
619 static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
620 struct kvm_cpu_context *__host_ctxt)
622 struct kvm_vcpu *vcpu;
623 unsigned long str_va;
625 vcpu = __host_ctxt->__hyp_running_vcpu;
627 if (read_sysreg(vttbr_el2)) {
628 __timer_disable_traps(vcpu);
629 __deactivate_traps(vcpu);
630 __deactivate_vm(vcpu);
631 __sysreg_restore_state_nvhe(__host_ctxt);
635 * Force the panic string to be loaded from the literal pool,
636 * making sure it is a kernel address and not a PC-relative
639 asm volatile("ldr %0, =%1" : "=r" (str_va) : "S" (__hyp_panic_string));
641 __hyp_do_panic(str_va,
643 read_sysreg(esr_el2), read_sysreg_el2(far),
644 read_sysreg(hpfar_el2), par, vcpu);
647 static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
648 struct kvm_cpu_context *host_ctxt)
650 struct kvm_vcpu *vcpu;
651 vcpu = host_ctxt->__hyp_running_vcpu;
653 __deactivate_traps(vcpu);
654 sysreg_restore_host_state_vhe(host_ctxt);
656 panic(__hyp_panic_string,
658 read_sysreg_el2(esr), read_sysreg_el2(far),
659 read_sysreg(hpfar_el2), par, vcpu);
661 NOKPROBE_SYMBOL(__hyp_call_panic_vhe);
663 void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
665 u64 spsr = read_sysreg_el2(spsr);
666 u64 elr = read_sysreg_el2(elr);
667 u64 par = read_sysreg(par_el1);
670 __hyp_call_panic_nvhe(spsr, elr, par, host_ctxt);
672 __hyp_call_panic_vhe(spsr, elr, par, host_ctxt);
677 asmlinkage void __hyp_text kvm_unexpected_el2_exception(void)
679 unsigned long addr, fixup;
680 struct kvm_cpu_context *host_ctxt;
681 struct exception_table_entry *entry, *end;
682 unsigned long elr_el2 = read_sysreg(elr_el2);
684 entry = hyp_symbol_addr(__start___kvm_ex_table);
685 end = hyp_symbol_addr(__stop___kvm_ex_table);
686 host_ctxt = __hyp_this_cpu_ptr(kvm_host_cpu_state);
688 while (entry < end) {
689 addr = (unsigned long)&entry->insn + entry->insn;
690 fixup = (unsigned long)&entry->fixup + entry->fixup;
692 if (addr != elr_el2) {
697 write_sysreg(fixup, elr_el2);
701 hyp_panic(host_ctxt);