GNU Linux-libre 4.19.286-gnu1
[releases.git] / arch / arm64 / include / asm / kvm_asm.h
1 /*
2  * Copyright (C) 2012,2013 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #ifndef __ARM_KVM_ASM_H__
19 #define __ARM_KVM_ASM_H__
20
21 #include <asm/virt.h>
22
23 #define VCPU_WORKAROUND_2_FLAG_SHIFT    0
24 #define VCPU_WORKAROUND_2_FLAG          (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
25
26 #define ARM_EXIT_WITH_SERROR_BIT  31
27 #define ARM_EXCEPTION_CODE(x)     ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
28 #define ARM_SERROR_PENDING(x)     !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
29
30 #define ARM_EXCEPTION_IRQ         0
31 #define ARM_EXCEPTION_EL1_SERROR  1
32 #define ARM_EXCEPTION_TRAP        2
33 /* The hyp-stub will return this for any kvm_call_hyp() call */
34 #define ARM_EXCEPTION_HYP_GONE    HVC_STUB_ERR
35
36 #ifndef __ASSEMBLY__
37
38 #include <linux/mm.h>
39
40 /* Translate a kernel address of @sym into its equivalent linear mapping */
41 #define kvm_ksym_ref(sym)                                               \
42         ({                                                              \
43                 void *val = &sym;                                       \
44                 if (!is_kernel_in_hyp_mode())                           \
45                         val = lm_alias(&sym);                           \
46                 val;                                                    \
47          })
48
49 struct kvm;
50 struct kvm_vcpu;
51
52 extern char __kvm_hyp_init[];
53 extern char __kvm_hyp_init_end[];
54
55 extern char __kvm_hyp_vector[];
56
57 extern void __kvm_flush_vm_context(void);
58 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
59 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
60 extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
61
62 extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
63
64 extern int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu);
65
66 extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu);
67
68 extern u64 __vgic_v3_get_ich_vtr_el2(void);
69 extern u64 __vgic_v3_read_vmcr(void);
70 extern void __vgic_v3_write_vmcr(u32 vmcr);
71 extern void __vgic_v3_init_lrs(void);
72
73 extern u32 __kvm_get_mdcr_el2(void);
74
75 extern u32 __init_stage2_translation(void);
76
77 /* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
78 #define __hyp_this_cpu_ptr(sym)                                         \
79         ({                                                              \
80                 void *__ptr = hyp_symbol_addr(sym);                     \
81                 __ptr += read_sysreg(tpidr_el2);                        \
82                 (typeof(&sym))__ptr;                                    \
83          })
84
85 #define __hyp_this_cpu_read(sym)                                        \
86         ({                                                              \
87                 *__hyp_this_cpu_ptr(sym);                               \
88          })
89
90 #define __KVM_EXTABLE(from, to)                                         \
91         "       .pushsection    __kvm_ex_table, \"a\"\n"                \
92         "       .align          3\n"                                    \
93         "       .long           (" #from " - .), (" #to " - .)\n"       \
94         "       .popsection\n"
95
96
97 #define __kvm_at(at_op, addr)                                           \
98 ( {                                                                     \
99         int __kvm_at_err = 0;                                           \
100         u64 spsr, elr;                                                  \
101         asm volatile(                                                   \
102         "       mrs     %1, spsr_el2\n"                                 \
103         "       mrs     %2, elr_el2\n"                                  \
104         "1:     at      "at_op", %3\n"                                  \
105         "       isb\n"                                                  \
106         "       b       9f\n"                                           \
107         "2:     msr     spsr_el2, %1\n"                                 \
108         "       msr     elr_el2, %2\n"                                  \
109         "       mov     %w0, %4\n"                                      \
110         "9:\n"                                                          \
111         __KVM_EXTABLE(1b, 2b)                                           \
112         : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr)                \
113         : "r" (addr), "i" (-EFAULT));                                   \
114         __kvm_at_err;                                                   \
115 } )
116
117
118 #else /* __ASSEMBLY__ */
119
120 .macro hyp_adr_this_cpu reg, sym, tmp
121         adr_l   \reg, \sym
122         mrs     \tmp, tpidr_el2
123         add     \reg, \reg, \tmp
124 .endm
125
126 .macro hyp_ldr_this_cpu reg, sym, tmp
127         adr_l   \reg, \sym
128         mrs     \tmp, tpidr_el2
129         ldr     \reg,  [\reg, \tmp]
130 .endm
131
132 .macro get_host_ctxt reg, tmp
133         hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp
134 .endm
135
136 .macro get_vcpu_ptr vcpu, ctxt
137         get_host_ctxt \ctxt, \vcpu
138         ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
139         kern_hyp_va     \vcpu
140 .endm
141
142 /*
143  * KVM extable for unexpected exceptions.
144  * In the same format _asm_extable, but output to a different section so that
145  * it can be mapped to EL2. The KVM version is not sorted. The caller must
146  * ensure:
147  * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
148  * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
149  */
150 .macro  _kvm_extable, from, to
151         .pushsection    __kvm_ex_table, "a"
152         .align          3
153         .long           (\from - .), (\to - .)
154         .popsection
155 .endm
156
157 #endif
158
159 #endif /* __ARM_KVM_ASM_H__ */