10 #include <asm/cpumask.h>
11 #include <uapi/asm/msr.h>
30 struct msr_regs_info {
42 struct saved_msr *array;
45 static inline unsigned long long native_read_tscp(unsigned int *aux)
47 unsigned long low, high;
48 asm volatile(".byte 0x0f,0x01,0xf9"
49 : "=a" (low), "=d" (high), "=c" (*aux));
50 return low | ((u64)high << 32);
54 * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
55 * constraint has different meanings. For i386, "A" means exactly
56 * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
57 * it means rax *or* rdx.
60 /* Using 64-bit values saves one instruction clearing the high half of low */
61 #define DECLARE_ARGS(val, low, high) unsigned long low, high
62 #define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
63 #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
65 #define DECLARE_ARGS(val, low, high) unsigned long long val
66 #define EAX_EDX_VAL(val, low, high) (val)
67 #define EAX_EDX_RET(val, low, high) "=A" (val)
70 static inline unsigned long long native_read_msr(unsigned int msr)
72 DECLARE_ARGS(val, low, high);
74 asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
75 return EAX_EDX_VAL(val, low, high);
78 static inline unsigned long long native_read_msr_safe(unsigned int msr,
81 DECLARE_ARGS(val, low, high);
83 asm volatile("2: rdmsr ; xor %[err],%[err]\n"
85 ".section .fixup,\"ax\"\n\t"
86 "3: mov %[fault],%[err] ; jmp 1b\n\t"
89 : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
90 : "c" (msr), [fault] "i" (-EIO));
91 return EAX_EDX_VAL(val, low, high);
94 static inline void native_write_msr(unsigned int msr,
95 unsigned low, unsigned high)
97 asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
100 /* Can be uninlined because referenced by paravirt */
101 notrace static inline int native_write_msr_safe(unsigned int msr,
102 unsigned low, unsigned high)
105 asm volatile("2: wrmsr ; xor %[err],%[err]\n"
107 ".section .fixup,\"ax\"\n\t"
108 "3: mov %[fault],%[err] ; jmp 1b\n\t"
112 : "c" (msr), "0" (low), "d" (high),
118 extern int rdmsr_safe_regs(u32 regs[8]);
119 extern int wrmsr_safe_regs(u32 regs[8]);
122 * rdtsc() - returns the current TSC without ordering constraints
124 * rdtsc() returns the result of RDTSC as a 64-bit integer. The
125 * only ordering constraint it supplies is the ordering implied by
126 * "asm volatile": it will put the RDTSC in the place you expect. The
127 * CPU can and will speculatively execute that RDTSC, though, so the
128 * results can be non-monotonic if compared on different CPUs.
130 static __always_inline unsigned long long rdtsc(void)
132 DECLARE_ARGS(val, low, high);
134 asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
136 return EAX_EDX_VAL(val, low, high);
140 * rdtsc_ordered() - read the current TSC in program order
142 * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
143 * It is ordered like a load to a global in-memory counter. It should
144 * be impossible to observe non-monotonic rdtsc_unordered() behavior
145 * across multiple CPUs as long as the TSC is synced.
147 static __always_inline unsigned long long rdtsc_ordered(void)
150 * The RDTSC instruction is not ordered relative to memory
151 * access. The Intel SDM and the AMD APM are both vague on this
152 * point, but empirically an RDTSC instruction can be
153 * speculatively executed before prior loads. An RDTSC
154 * immediately after an appropriate barrier appears to be
155 * ordered as a normal load, that is, it provides the same
156 * ordering guarantees as reading from a global memory location
157 * that some other imaginary CPU is updating continuously with a
164 /* Deprecated, keep it for a cycle for easier merging: */
165 #define rdtscll(now) do { (now) = rdtsc_ordered(); } while (0)
167 static inline unsigned long long native_read_pmc(int counter)
169 DECLARE_ARGS(val, low, high);
171 asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
172 return EAX_EDX_VAL(val, low, high);
175 #ifdef CONFIG_PARAVIRT
176 #include <asm/paravirt.h>
178 #include <linux/errno.h>
180 * Access to machine-specific registers (available on 586 and better only)
181 * Note: the rd* operations modify the parameters directly (without using
182 * pointer indirection), this allows gcc to optimize better
185 #define rdmsr(msr, low, high) \
187 u64 __val = native_read_msr((msr)); \
188 (void)((low) = (u32)__val); \
189 (void)((high) = (u32)(__val >> 32)); \
192 static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
194 native_write_msr(msr, low, high);
197 #define rdmsrl(msr, val) \
198 ((val) = native_read_msr((msr)))
200 static inline void wrmsrl(unsigned msr, u64 val)
202 native_write_msr(msr, (u32)val, (u32)(val >> 32));
205 /* wrmsr with exception handling */
206 static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
208 return native_write_msr_safe(msr, low, high);
211 /* rdmsr with exception handling */
212 #define rdmsr_safe(msr, low, high) \
215 u64 __val = native_read_msr_safe((msr), &__err); \
216 (*low) = (u32)__val; \
217 (*high) = (u32)(__val >> 32); \
221 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
225 *p = native_read_msr_safe(msr, &err);
229 #define rdpmc(counter, low, high) \
231 u64 _l = native_read_pmc((counter)); \
233 (high) = (u32)(_l >> 32); \
236 #define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
238 #endif /* !CONFIG_PARAVIRT */
241 * 64-bit version of wrmsr_safe():
243 static inline int wrmsrl_safe(u32 msr, u64 val)
245 return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
248 #define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
250 #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
252 struct msr *msrs_alloc(void);
253 void msrs_free(struct msr *msrs);
254 int msr_set_bit(u32 msr, u8 bit);
255 int msr_clear_bit(u32 msr, u8 bit);
258 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
259 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
260 int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
261 int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
262 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
263 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
264 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
265 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
266 int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
267 int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
268 int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
269 int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
270 #else /* CONFIG_SMP */
271 static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
273 rdmsr(msr_no, *l, *h);
276 static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
281 static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
286 static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
291 static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
294 rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
296 static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
299 wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
301 static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
304 return rdmsr_safe(msr_no, l, h);
306 static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
308 return wrmsr_safe(msr_no, l, h);
310 static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
312 return rdmsrl_safe(msr_no, q);
314 static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
316 return wrmsrl_safe(msr_no, q);
318 static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
320 return rdmsr_safe_regs(regs);
322 static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
324 return wrmsr_safe_regs(regs);
326 #endif /* CONFIG_SMP */
327 #endif /* __ASSEMBLY__ */
328 #endif /* _ASM_X86_MSR_H */