GNU Linux-libre 4.14.266-gnu1
[releases.git] / arch / arm64 / include / asm / kvm_asm.h
1 /*
2  * Copyright (C) 2012,2013 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #ifndef __ARM_KVM_ASM_H__
19 #define __ARM_KVM_ASM_H__
20
21 #include <asm/virt.h>
22
23 #define ARM_EXIT_WITH_SERROR_BIT  31
24 #define ARM_EXCEPTION_CODE(x)     ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
25 #define ARM_SERROR_PENDING(x)     !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
26
27 #define ARM_EXCEPTION_IRQ         0
28 #define ARM_EXCEPTION_EL1_SERROR  1
29 #define ARM_EXCEPTION_TRAP        2
30 /* The hyp-stub will return this for any kvm_call_hyp() call */
31 #define ARM_EXCEPTION_HYP_GONE    HVC_STUB_ERR
32
33 #define KVM_ARM64_DEBUG_DIRTY_SHIFT     0
34 #define KVM_ARM64_DEBUG_DIRTY           (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
35
36 #define VCPU_WORKAROUND_2_FLAG_SHIFT    0
37 #define VCPU_WORKAROUND_2_FLAG          (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
38
39 /* Translate a kernel address of @sym into its equivalent linear mapping */
40 #define kvm_ksym_ref(sym)                                               \
41         ({                                                              \
42                 void *val = &sym;                                       \
43                 if (!is_kernel_in_hyp_mode())                           \
44                         val = phys_to_virt((u64)&sym - kimage_voffset); \
45                 val;                                                    \
46          })
47
48 #ifndef __ASSEMBLY__
49 struct kvm;
50 struct kvm_vcpu;
51
52 extern char __kvm_hyp_init[];
53 extern char __kvm_hyp_init_end[];
54
55 extern char __kvm_hyp_vector[];
56
57 extern void __kvm_flush_vm_context(void);
58 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
59 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
60 extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
61
62 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
63
64 extern u64 __vgic_v3_get_ich_vtr_el2(void);
65 extern u64 __vgic_v3_read_vmcr(void);
66 extern void __vgic_v3_write_vmcr(u32 vmcr);
67 extern void __vgic_v3_init_lrs(void);
68
69 extern u32 __kvm_get_mdcr_el2(void);
70
71 extern u32 __init_stage2_translation(void);
72
73 /* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
74 #define __hyp_this_cpu_ptr(sym)                                         \
75         ({                                                              \
76                 void *__ptr = hyp_symbol_addr(sym);                     \
77                 __ptr += read_sysreg(tpidr_el2);                        \
78                 (typeof(&sym))__ptr;                                    \
79          })
80
81 #define __hyp_this_cpu_read(sym)                                        \
82         ({                                                              \
83                 *__hyp_this_cpu_ptr(sym);                               \
84          })
85
86 #define __KVM_EXTABLE(from, to)                                         \
87         "       .pushsection    __kvm_ex_table, \"a\"\n"                \
88         "       .align          3\n"                                    \
89         "       .long           (" #from " - .), (" #to " - .)\n"       \
90         "       .popsection\n"
91
92
93 #define __kvm_at(at_op, addr)                                           \
94 ( {                                                                     \
95         int __kvm_at_err = 0;                                           \
96         u64 spsr, elr;                                                  \
97         asm volatile(                                                   \
98         "       mrs     %1, spsr_el2\n"                                 \
99         "       mrs     %2, elr_el2\n"                                  \
100         "1:     at      "at_op", %3\n"                                  \
101         "       isb\n"                                                  \
102         "       b       9f\n"                                           \
103         "2:     msr     spsr_el2, %1\n"                                 \
104         "       msr     elr_el2, %2\n"                                  \
105         "       mov     %w0, %4\n"                                      \
106         "9:\n"                                                          \
107         __KVM_EXTABLE(1b, 2b)                                           \
108         : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr)                \
109         : "r" (addr), "i" (-EFAULT));                                   \
110         __kvm_at_err;                                                   \
111 } )
112
113
114 #else /* __ASSEMBLY__ */
115
116 .macro hyp_adr_this_cpu reg, sym, tmp
117         adr_l   \reg, \sym
118         mrs     \tmp, tpidr_el2
119         add     \reg, \reg, \tmp
120 .endm
121
122 .macro hyp_ldr_this_cpu reg, sym, tmp
123         adr_l   \reg, \sym
124         mrs     \tmp, tpidr_el2
125         ldr     \reg,  [\reg, \tmp]
126 .endm
127
128 .macro get_host_ctxt reg, tmp
129         hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp
130 .endm
131
132 .macro get_vcpu_ptr vcpu, ctxt
133         get_host_ctxt \ctxt, \vcpu
134         ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
135         kern_hyp_va     \vcpu
136 .endm
137
138 /*
139  * KVM extable for unexpected exceptions.
140  * In the same format _asm_extable, but output to a different section so that
141  * it can be mapped to EL2. The KVM version is not sorted. The caller must
142  * ensure:
143  * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
144  * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
145  */
146 .macro  _kvm_extable, from, to
147         .pushsection    __kvm_ex_table, "a"
148         .align          3
149         .long           (\from - .), (\to - .)
150         .popsection
151 .endm
152
153 #endif
154
155 #endif /* __ARM_KVM_ASM_H__ */