GNU Linux-libre 4.9.309-gnu1
[releases.git] / arch / x86 / kvm / cpuid.h
1 #ifndef ARCH_X86_KVM_CPUID_H
2 #define ARCH_X86_KVM_CPUID_H
3
4 #include "x86.h"
5 #include <asm/cpu.h>
6
7 int kvm_update_cpuid(struct kvm_vcpu *vcpu);
8 bool kvm_mpx_supported(void);
9 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
10                                               u32 function, u32 index);
11 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
12                             struct kvm_cpuid_entry2 __user *entries,
13                             unsigned int type);
14 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
15                              struct kvm_cpuid *cpuid,
16                              struct kvm_cpuid_entry __user *entries);
17 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
18                               struct kvm_cpuid2 *cpuid,
19                               struct kvm_cpuid_entry2 __user *entries);
20 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21                               struct kvm_cpuid2 *cpuid,
22                               struct kvm_cpuid_entry2 __user *entries);
23 void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
24
25 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
26
27 static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
28 {
29         return vcpu->arch.maxphyaddr;
30 }
31
32 static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
33 {
34         struct kvm_cpuid_entry2 *best;
35
36         if (!static_cpu_has(X86_FEATURE_XSAVE))
37                 return false;
38
39         best = kvm_find_cpuid_entry(vcpu, 1, 0);
40         return best && (best->ecx & bit(X86_FEATURE_XSAVE));
41 }
42
43 static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
44 {
45         struct kvm_cpuid_entry2 *best;
46
47         best = kvm_find_cpuid_entry(vcpu, 1, 0);
48         return best && (best->edx & bit(X86_FEATURE_MTRR));
49 }
50
51 static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
52 {
53         struct kvm_cpuid_entry2 *best;
54
55         best = kvm_find_cpuid_entry(vcpu, 7, 0);
56         return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST));
57 }
58
59 static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
60 {
61         struct kvm_cpuid_entry2 *best;
62
63         best = kvm_find_cpuid_entry(vcpu, 7, 0);
64         return best && (best->ebx & bit(X86_FEATURE_SMEP));
65 }
66
67 static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu)
68 {
69         struct kvm_cpuid_entry2 *best;
70
71         best = kvm_find_cpuid_entry(vcpu, 7, 0);
72         return best && (best->ebx & bit(X86_FEATURE_SMAP));
73 }
74
75 static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
76 {
77         struct kvm_cpuid_entry2 *best;
78
79         best = kvm_find_cpuid_entry(vcpu, 7, 0);
80         return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
81 }
82
83 static inline bool guest_cpuid_has_pku(struct kvm_vcpu *vcpu)
84 {
85         struct kvm_cpuid_entry2 *best;
86
87         best = kvm_find_cpuid_entry(vcpu, 7, 0);
88         return best && (best->ecx & bit(X86_FEATURE_PKU));
89 }
90
91 static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu)
92 {
93         struct kvm_cpuid_entry2 *best;
94
95         best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
96         return best && (best->edx & bit(X86_FEATURE_LM));
97 }
98
99 static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu)
100 {
101         struct kvm_cpuid_entry2 *best;
102
103         best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
104         return best && (best->ecx & bit(X86_FEATURE_OSVW));
105 }
106
107 static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
108 {
109         struct kvm_cpuid_entry2 *best;
110
111         best = kvm_find_cpuid_entry(vcpu, 1, 0);
112         return best && (best->ecx & bit(X86_FEATURE_PCID));
113 }
114
115 static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
116 {
117         struct kvm_cpuid_entry2 *best;
118
119         best = kvm_find_cpuid_entry(vcpu, 1, 0);
120         return best && (best->ecx & bit(X86_FEATURE_X2APIC));
121 }
122
123 static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
124 {
125         struct kvm_cpuid_entry2 *best;
126
127         best = kvm_find_cpuid_entry(vcpu, 0, 0);
128         return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
129 }
130
131 static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu)
132 {
133         struct kvm_cpuid_entry2 *best;
134
135         best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
136         return best && (best->edx & bit(X86_FEATURE_GBPAGES));
137 }
138
139 static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
140 {
141         struct kvm_cpuid_entry2 *best;
142
143         best = kvm_find_cpuid_entry(vcpu, 7, 0);
144         return best && (best->ebx & bit(X86_FEATURE_RTM));
145 }
146
147 static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
148 {
149         struct kvm_cpuid_entry2 *best;
150
151         best = kvm_find_cpuid_entry(vcpu, 7, 0);
152         return best && (best->ebx & bit(X86_FEATURE_MPX));
153 }
154
155 static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
156 {
157         struct kvm_cpuid_entry2 *best;
158
159         best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
160         return best && (best->edx & bit(X86_FEATURE_RDTSCP));
161 }
162
163 static inline bool guest_cpuid_has_ibpb(struct kvm_vcpu *vcpu)
164 {
165         struct kvm_cpuid_entry2 *best;
166
167         best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
168         if (best && (best->ebx & bit(X86_FEATURE_AMD_IBPB)))
169                 return true;
170         best = kvm_find_cpuid_entry(vcpu, 7, 0);
171         return best && (best->edx & bit(X86_FEATURE_SPEC_CTRL));
172 }
173
174 static inline bool guest_cpuid_has_spec_ctrl(struct kvm_vcpu *vcpu)
175 {
176         struct kvm_cpuid_entry2 *best;
177
178         best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
179         if (best && (best->ebx & (bit(X86_FEATURE_AMD_IBRS | bit(X86_FEATURE_AMD_SSBD)))))
180                 return true;
181         best = kvm_find_cpuid_entry(vcpu, 7, 0);
182         return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_SPEC_CTRL_SSBD)));
183 }
184
185 static inline bool guest_cpuid_has_arch_capabilities(struct kvm_vcpu *vcpu)
186 {
187         struct kvm_cpuid_entry2 *best;
188
189         best = kvm_find_cpuid_entry(vcpu, 7, 0);
190         return best && (best->edx & bit(X86_FEATURE_ARCH_CAPABILITIES));
191 }
192
193 static inline bool guest_cpuid_has_virt_ssbd(struct kvm_vcpu *vcpu)
194 {
195         struct kvm_cpuid_entry2 *best;
196
197         best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
198         return best && (best->ebx & bit(X86_FEATURE_VIRT_SSBD));
199 }
200
201
202
203 /*
204  * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
205  */
206 #define BIT_NRIPS       3
207
208 static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu)
209 {
210         struct kvm_cpuid_entry2 *best;
211
212         best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0);
213
214         /*
215          * NRIPS is a scattered cpuid feature, so we can't use
216          * X86_FEATURE_NRIPS here (X86_FEATURE_NRIPS would be bit
217          * position 8, not 3).
218          */
219         return best && (best->edx & bit(BIT_NRIPS));
220 }
221 #undef BIT_NRIPS
222
223 static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
224 {
225         struct kvm_cpuid_entry2 *best;
226
227         best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
228         if (!best)
229                 return -1;
230
231         return x86_family(best->eax);
232 }
233
234 static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
235 {
236         struct kvm_cpuid_entry2 *best;
237
238         best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
239         if (!best)
240                 return -1;
241
242         return x86_model(best->eax);
243 }
244
245 static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
246 {
247         struct kvm_cpuid_entry2 *best;
248
249         best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
250         if (!best)
251                 return -1;
252
253         return x86_stepping(best->eax);
254 }
255
256 #endif