2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/arm-smccc.h>
19 #include <linux/types.h>
20 #include <linux/jump_label.h>
21 #include <uapi/linux/psci.h>
23 #include <kvm/arm_psci.h>
25 #include <asm/extable.h>
26 #include <asm/kvm_asm.h>
27 #include <asm/kvm_emulate.h>
28 #include <asm/kvm_hyp.h>
29 #include <asm/fpsimd.h>
30 #include <asm/vectors.h>
32 extern struct exception_table_entry __start___kvm_ex_table;
33 extern struct exception_table_entry __stop___kvm_ex_table;
35 static bool __hyp_text __fpsimd_enabled_nvhe(void)
37 return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
40 static bool __hyp_text __fpsimd_enabled_vhe(void)
42 return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN);
45 static hyp_alternate_select(__fpsimd_is_enabled,
46 __fpsimd_enabled_nvhe, __fpsimd_enabled_vhe,
47 ARM64_HAS_VIRT_HOST_EXTN);
49 bool __hyp_text __fpsimd_enabled(void)
51 return __fpsimd_is_enabled()();
54 static void __hyp_text __activate_traps_vhe(void)
58 val = read_sysreg(cpacr_el1);
60 val &= ~CPACR_EL1_FPEN;
61 write_sysreg(val, cpacr_el1);
63 write_sysreg(kvm_get_hyp_vector(), vbar_el1);
66 static void __hyp_text __activate_traps_nvhe(void)
70 val = CPTR_EL2_DEFAULT;
71 val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
72 write_sysreg(val, cptr_el2);
75 static hyp_alternate_select(__activate_traps_arch,
76 __activate_traps_nvhe, __activate_traps_vhe,
77 ARM64_HAS_VIRT_HOST_EXTN);
79 static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
84 * We are about to set CPTR_EL2.TFP to trap all floating point
85 * register accesses to EL2, however, the ARM ARM clearly states that
86 * traps are only taken to EL2 if the operation would not otherwise
87 * trap to EL1. Therefore, always make sure that for 32-bit guests,
88 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
89 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
90 * it will cause an exception.
92 val = vcpu->arch.hcr_el2;
93 if (!(val & HCR_RW) && system_supports_fpsimd()) {
94 write_sysreg(1 << 30, fpexc32_el2);
97 write_sysreg(val, hcr_el2);
98 /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
99 write_sysreg(1 << 15, hstr_el2);
101 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
102 * PMSELR_EL0 to make sure it never contains the cycle
103 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
104 * EL1 instead of being trapped to EL2.
106 write_sysreg(0, pmselr_el0);
107 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
108 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
109 __activate_traps_arch()();
112 static void __hyp_text __deactivate_traps_vhe(void)
114 const char *host_vectors = vectors;
115 u64 mdcr_el2 = read_sysreg(mdcr_el2);
117 mdcr_el2 &= MDCR_EL2_HPMN_MASK |
118 MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT |
122 write_sysreg(mdcr_el2, mdcr_el2);
123 write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
124 write_sysreg(CPACR_EL1_FPEN, cpacr_el1);
126 if (!arm64_kernel_unmapped_at_el0())
127 host_vectors = __this_cpu_read(this_cpu_vector);
128 write_sysreg(host_vectors, vbar_el1);
131 static void __hyp_text __deactivate_traps_nvhe(void)
133 u64 mdcr_el2 = read_sysreg(mdcr_el2);
135 mdcr_el2 &= MDCR_EL2_HPMN_MASK;
136 mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
138 write_sysreg(mdcr_el2, mdcr_el2);
139 write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
140 write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
143 static hyp_alternate_select(__deactivate_traps_arch,
144 __deactivate_traps_nvhe, __deactivate_traps_vhe,
145 ARM64_HAS_VIRT_HOST_EXTN);
147 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
150 * If we pended a virtual abort, preserve it until it gets
151 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
152 * the crucial bit is "On taking a vSError interrupt,
153 * HCR_EL2.VSE is cleared to 0."
155 if (vcpu->arch.hcr_el2 & HCR_VSE)
156 vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
158 __deactivate_traps_arch()();
159 write_sysreg(0, hstr_el2);
160 write_sysreg(0, pmuserenr_el0);
163 static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
165 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
166 write_sysreg(kvm->arch.vttbr, vttbr_el2);
169 static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
171 write_sysreg(0, vttbr_el2);
174 static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
176 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
177 __vgic_v3_save_state(vcpu);
179 __vgic_v2_save_state(vcpu);
181 write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2);
184 static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
188 val = read_sysreg(hcr_el2);
189 val |= HCR_INT_OVERRIDE;
190 val |= vcpu->arch.irq_lines;
191 write_sysreg(val, hcr_el2);
193 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
194 __vgic_v3_restore_state(vcpu);
196 __vgic_v2_restore_state(vcpu);
199 static bool __hyp_text __true_value(void)
204 static bool __hyp_text __false_value(void)
209 static hyp_alternate_select(__check_arm_834220,
210 __false_value, __true_value,
211 ARM64_WORKAROUND_834220);
213 static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
218 * Resolve the IPA the hard way using the guest VA.
220 * Stage-1 translation already validated the memory access
221 * rights. As such, we can use the EL1 translation regime, and
222 * don't have to distinguish between EL0 and EL1 access.
224 * We do need to save/restore PAR_EL1 though, as we haven't
225 * saved the guest context yet, and we may return early...
227 par = read_sysreg(par_el1);
228 if (!__kvm_at("s1e1r", far))
229 tmp = read_sysreg(par_el1);
231 tmp = 1; /* back to the guest */
232 write_sysreg(par, par_el1);
234 if (unlikely(tmp & 1))
235 return false; /* Translation failed, back to guest */
237 /* Convert PAR to HPFAR format */
238 *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4;
242 static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
244 u64 esr = read_sysreg_el2(esr);
245 u8 ec = ESR_ELx_EC(esr);
248 vcpu->arch.fault.esr_el2 = esr;
250 if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
253 far = read_sysreg_el2(far);
256 * The HPFAR can be invalid if the stage 2 fault did not
257 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
258 * bit is clear) and one of the two following cases are true:
259 * 1. The fault was due to a permission fault
260 * 2. The processor carries errata 834220
262 * Therefore, for all non S1PTW faults where we either have a
263 * permission fault or the errata workaround is enabled, we
264 * resolve the IPA using the AT instruction.
266 if (!(esr & ESR_ELx_S1PTW) &&
267 (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
268 if (!__translate_far_to_hpfar(far, &hpfar))
271 hpfar = read_sysreg(hpfar_el2);
274 vcpu->arch.fault.far_el2 = far;
275 vcpu->arch.fault.hpfar_el2 = hpfar;
279 static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
281 *vcpu_pc(vcpu) = read_sysreg_el2(elr);
283 if (vcpu_mode_is_32bit(vcpu)) {
284 vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
285 kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
286 write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
291 write_sysreg_el2(*vcpu_pc(vcpu), elr);
294 static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
296 if (!cpus_have_const_cap(ARM64_SSBD))
299 return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
302 static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
304 #ifdef CONFIG_ARM64_SSBD
306 * The host runs with the workaround always present. If the
307 * guest wants it disabled, so be it...
309 if (__needs_ssbd_off(vcpu) &&
310 __hyp_this_cpu_read(arm64_ssbd_callback_required))
311 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
315 static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
317 #ifdef CONFIG_ARM64_SSBD
319 * If the guest has disabled the workaround, bring it back on.
321 if (__needs_ssbd_off(vcpu) &&
322 __hyp_this_cpu_read(arm64_ssbd_callback_required))
323 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
327 int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
329 struct kvm_cpu_context *host_ctxt;
330 struct kvm_cpu_context *guest_ctxt;
334 vcpu = kern_hyp_va(vcpu);
336 host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
337 host_ctxt->__hyp_running_vcpu = vcpu;
338 guest_ctxt = &vcpu->arch.ctxt;
340 __sysreg_save_host_state(host_ctxt);
341 __debug_cond_save_host_state(vcpu);
343 __activate_traps(vcpu);
346 __vgic_restore_state(vcpu);
347 __timer_restore_state(vcpu);
350 * We must restore the 32-bit state before the sysregs, thanks
351 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
353 __sysreg32_restore_state(vcpu);
354 __sysreg_restore_guest_state(guest_ctxt);
355 __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
357 __set_guest_arch_workaround_state(vcpu);
359 /* Jump in the fire! */
361 exit_code = __guest_enter(vcpu, host_ctxt);
362 /* And we're baaack! */
365 * We're using the raw exception code in order to only process
366 * the trap if no SError is pending. We will come back to the
367 * same PC once the SError has been injected, and replay the
368 * trapping instruction.
370 if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
373 if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
374 exit_code == ARM_EXCEPTION_TRAP) {
377 valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
378 kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
379 kvm_vcpu_dabt_isvalid(vcpu) &&
380 !kvm_vcpu_dabt_isextabt(vcpu) &&
381 !kvm_vcpu_dabt_iss1tw(vcpu);
384 int ret = __vgic_v2_perform_cpuif_access(vcpu);
392 /* Promote an illegal access to an SError */
394 exit_code = ARM_EXCEPTION_EL1_SERROR;
397 /* 0 falls through to be handler out of EL2 */
401 if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
402 exit_code == ARM_EXCEPTION_TRAP &&
403 (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
404 kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
405 int ret = __vgic_v3_perform_cpuif_access(vcpu);
412 /* 0 falls through to be handled out of EL2 */
415 __set_host_arch_workaround_state(vcpu);
417 fp_enabled = __fpsimd_enabled();
419 __sysreg_save_guest_state(guest_ctxt);
420 __sysreg32_save_state(vcpu);
421 __timer_save_state(vcpu);
422 __vgic_save_state(vcpu);
424 __deactivate_traps(vcpu);
425 __deactivate_vm(vcpu);
427 __sysreg_restore_host_state(host_ctxt);
430 __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
431 __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
434 __debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
436 * This must come after restoring the host sysregs, since a non-VHE
437 * system may enable SPE here and make use of the TTBRs.
439 __debug_cond_restore_host_state(vcpu);
444 static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
446 static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
447 struct kvm_vcpu *vcpu)
449 unsigned long str_va;
452 * Force the panic string to be loaded from the literal pool,
453 * making sure it is a kernel address and not a PC-relative
456 asm volatile("ldr %0, =%1" : "=r" (str_va) : "S" (__hyp_panic_string));
458 __hyp_do_panic(str_va,
460 read_sysreg(esr_el2), read_sysreg_el2(far),
461 read_sysreg(hpfar_el2), par, vcpu);
464 static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
465 struct kvm_vcpu *vcpu)
467 panic(__hyp_panic_string,
469 read_sysreg_el2(esr), read_sysreg_el2(far),
470 read_sysreg(hpfar_el2), par, vcpu);
473 static hyp_alternate_select(__hyp_call_panic,
474 __hyp_call_panic_nvhe, __hyp_call_panic_vhe,
475 ARM64_HAS_VIRT_HOST_EXTN);
477 void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
479 struct kvm_vcpu *vcpu = NULL;
481 u64 spsr = read_sysreg_el2(spsr);
482 u64 elr = read_sysreg_el2(elr);
483 u64 par = read_sysreg(par_el1);
485 if (read_sysreg(vttbr_el2)) {
486 vcpu = host_ctxt->__hyp_running_vcpu;
487 __timer_save_state(vcpu);
488 __deactivate_traps(vcpu);
489 __deactivate_vm(vcpu);
490 __sysreg_restore_host_state(host_ctxt);
493 /* Call panic for real */
494 __hyp_call_panic()(spsr, elr, par, vcpu);
499 asmlinkage void __hyp_text kvm_unexpected_el2_exception(void)
501 unsigned long addr, fixup;
502 struct kvm_cpu_context *host_ctxt;
503 struct exception_table_entry *entry, *end;
504 unsigned long elr_el2 = read_sysreg(elr_el2);
506 entry = hyp_symbol_addr(__start___kvm_ex_table);
507 end = hyp_symbol_addr(__stop___kvm_ex_table);
508 host_ctxt = __hyp_this_cpu_ptr(kvm_host_cpu_state);
510 while (entry < end) {
511 addr = (unsigned long)&entry->insn + entry->insn;
512 fixup = (unsigned long)&entry->fixup + entry->fixup;
514 if (addr != elr_el2) {
519 write_sysreg(fixup, elr_el2);
523 hyp_panic(host_ctxt);