4 * Copyright (C) 2006 Qumranet, Inc.
5 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6 * Copyright(C) 2015 Intel Corporation.
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
11 * Marcelo Tosatti <mtosatti@redhat.com>
12 * Paolo Bonzini <pbonzini@redhat.com>
13 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
19 #include <linux/kvm_host.h>
25 #define IA32_MTRR_DEF_TYPE_E (1ULL << 11)
26 #define IA32_MTRR_DEF_TYPE_FE (1ULL << 10)
27 #define IA32_MTRR_DEF_TYPE_TYPE_MASK (0xff)
29 static bool msr_mtrr_valid(unsigned msr)
32 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
33 case MSR_MTRRfix64K_00000:
34 case MSR_MTRRfix16K_80000:
35 case MSR_MTRRfix16K_A0000:
36 case MSR_MTRRfix4K_C0000:
37 case MSR_MTRRfix4K_C8000:
38 case MSR_MTRRfix4K_D0000:
39 case MSR_MTRRfix4K_D8000:
40 case MSR_MTRRfix4K_E0000:
41 case MSR_MTRRfix4K_E8000:
42 case MSR_MTRRfix4K_F0000:
43 case MSR_MTRRfix4K_F8000:
51 static bool valid_mtrr_type(unsigned t)
53 return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
56 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
61 if (!msr_mtrr_valid(msr))
64 if (msr == MSR_IA32_CR_PAT) {
65 return kvm_pat_valid(data);
66 } else if (msr == MSR_MTRRdefType) {
69 return valid_mtrr_type(data & 0xff);
70 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
71 for (i = 0; i < 8 ; i++)
72 if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
78 WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
80 mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
83 if (!valid_mtrr_type(data & 0xff))
90 kvm_inject_gp(vcpu, 0);
96 EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
98 static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
100 return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_E);
103 static bool fixed_mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
105 return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_FE);
108 static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
110 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
113 static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
116 * Intel SDM 11.11.2.2: all MTRRs are disabled when
117 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
118 * memory type is applied to all of physical memory.
120 * However, virtual machines can be run with CPUID such that
121 * there are no MTRRs. In that case, the firmware will never
122 * enable MTRRs and it is obviously undesirable to run the
123 * guest entirely with UC memory and we use WB.
125 if (guest_cpuid_has(vcpu, X86_FEATURE_MTRR))
126 return MTRR_TYPE_UNCACHABLE;
128 return MTRR_TYPE_WRBACK;
132 * Three terms are used in the following code:
133 * - segment, it indicates the address segments covered by fixed MTRRs.
134 * - unit, it corresponds to the MSR entry in the segment.
135 * - range, a range is covered in one memory cache type.
137 struct fixed_mtrr_segment {
143 /* the start position in kvm_mtrr.fixed_ranges[]. */
147 static struct fixed_mtrr_segment fixed_seg_table[] = {
148 /* MSR_MTRRfix64K_00000, 1 unit. 64K fixed mtrr. */
152 .range_shift = 16, /* 64K */
157 * MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000, 2 units,
163 .range_shift = 14, /* 16K */
168 * MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000, 8 units,
174 .range_shift = 12, /* 12K */
180 * The size of unit is covered in one MSR, one MSR entry contains
181 * 8 ranges so that unit size is always 8 * 2^range_shift.
183 static u64 fixed_mtrr_seg_unit_size(int seg)
185 return 8 << fixed_seg_table[seg].range_shift;
188 static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
191 case MSR_MTRRfix64K_00000:
195 case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
197 *unit = array_index_nospec(
198 msr - MSR_MTRRfix16K_80000,
199 MSR_MTRRfix16K_A0000 - MSR_MTRRfix16K_80000 + 1);
201 case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
203 *unit = array_index_nospec(
204 msr - MSR_MTRRfix4K_C0000,
205 MSR_MTRRfix4K_F8000 - MSR_MTRRfix4K_C0000 + 1);
214 static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end)
216 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
217 u64 unit_size = fixed_mtrr_seg_unit_size(seg);
219 *start = mtrr_seg->start + unit * unit_size;
220 *end = *start + unit_size;
221 WARN_ON(*end > mtrr_seg->end);
224 static int fixed_mtrr_seg_unit_range_index(int seg, int unit)
226 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
228 WARN_ON(mtrr_seg->start + unit * fixed_mtrr_seg_unit_size(seg)
231 /* each unit has 8 ranges. */
232 return mtrr_seg->range_start + 8 * unit;
235 static int fixed_mtrr_seg_end_range_index(int seg)
237 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
240 n = (mtrr_seg->end - mtrr_seg->start) >> mtrr_seg->range_shift;
241 return mtrr_seg->range_start + n - 1;
244 static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end)
248 if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
251 fixed_mtrr_seg_unit_range(seg, unit, start, end);
255 static int fixed_msr_to_range_index(u32 msr)
259 if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
262 return fixed_mtrr_seg_unit_range_index(seg, unit);
265 static int fixed_mtrr_addr_to_seg(u64 addr)
267 struct fixed_mtrr_segment *mtrr_seg;
268 int seg, seg_num = ARRAY_SIZE(fixed_seg_table);
270 for (seg = 0; seg < seg_num; seg++) {
271 mtrr_seg = &fixed_seg_table[seg];
272 if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
279 static int fixed_mtrr_addr_seg_to_range_index(u64 addr, int seg)
281 struct fixed_mtrr_segment *mtrr_seg;
284 mtrr_seg = &fixed_seg_table[seg];
285 index = mtrr_seg->range_start;
286 index += (addr - mtrr_seg->start) >> mtrr_seg->range_shift;
290 static u64 fixed_mtrr_range_end_addr(int seg, int index)
292 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
293 int pos = index - mtrr_seg->range_start;
295 return mtrr_seg->start + ((pos + 1) << mtrr_seg->range_shift);
298 static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
302 *start = range->base & PAGE_MASK;
304 mask = range->mask & PAGE_MASK;
306 /* This cannot overflow because writing to the reserved bits of
307 * variable MTRRs causes a #GP.
309 *end = (*start | ~mask) + 1;
312 static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
314 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
318 if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
319 !kvm_arch_has_noncoherent_dma(vcpu->kvm))
322 if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType)
326 if (fixed_msr_to_range(msr, &start, &end)) {
327 if (!fixed_mtrr_is_enabled(mtrr_state))
329 } else if (msr == MSR_MTRRdefType) {
333 /* variable range MTRRs. */
334 index = (msr - 0x200) / 2;
335 var_mtrr_range(&mtrr_state->var_ranges[index], &start, &end);
338 kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
341 static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range)
343 return (range->mask & (1 << 11)) != 0;
346 static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
348 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
349 struct kvm_mtrr_range *tmp, *cur;
350 int index, is_mtrr_mask;
352 index = (msr - 0x200) / 2;
353 is_mtrr_mask = msr - 0x200 - 2 * index;
354 cur = &mtrr_state->var_ranges[index];
356 /* remove the entry if it's in the list. */
357 if (var_mtrr_range_is_valid(cur))
358 list_del(&mtrr_state->var_ranges[index].node);
360 /* Extend the mask with all 1 bits to the left, since those
361 * bits must implicitly be 0. The bits are then cleared
367 cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu));
369 /* add it to the list if it's enabled. */
370 if (var_mtrr_range_is_valid(cur)) {
371 list_for_each_entry(tmp, &mtrr_state->head, node)
372 if (cur->base >= tmp->base)
374 list_add_tail(&cur->node, &tmp->node);
378 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
382 if (!kvm_mtrr_valid(vcpu, msr, data))
385 index = fixed_msr_to_range_index(msr);
387 *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data;
388 else if (msr == MSR_MTRRdefType)
389 vcpu->arch.mtrr_state.deftype = data;
390 else if (msr == MSR_IA32_CR_PAT)
391 vcpu->arch.pat = data;
393 set_var_mtrr_msr(vcpu, msr, data);
395 update_mtrr(vcpu, msr);
399 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
403 /* MSR_MTRRcap is a readonly MSR. */
404 if (msr == MSR_MTRRcap) {
409 * VCNT = KVM_NR_VAR_MTRR
411 *pdata = 0x500 | KVM_NR_VAR_MTRR;
415 if (!msr_mtrr_valid(msr))
418 index = fixed_msr_to_range_index(msr);
420 *pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index];
421 else if (msr == MSR_MTRRdefType)
422 *pdata = vcpu->arch.mtrr_state.deftype;
423 else if (msr == MSR_IA32_CR_PAT)
424 *pdata = vcpu->arch.pat;
425 else { /* Variable MTRRs */
428 index = (msr - 0x200) / 2;
429 is_mtrr_mask = msr - 0x200 - 2 * index;
431 *pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
433 *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
435 *pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1;
441 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
443 INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
448 struct kvm_mtrr *mtrr_state;
454 /* mtrr is completely disabled? */
456 /* [start, end) is not fully covered in MTRRs? */
459 /* private fields. */
461 /* used for fixed MTRRs. */
467 /* used for var MTRRs. */
469 struct kvm_mtrr_range *range;
470 /* max address has been covered in var MTRRs. */
478 static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter)
482 if (!fixed_mtrr_is_enabled(iter->mtrr_state))
485 seg = fixed_mtrr_addr_to_seg(iter->start);
490 index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg);
496 static bool match_var_range(struct mtrr_iter *iter,
497 struct kvm_mtrr_range *range)
501 var_mtrr_range(range, &start, &end);
502 if (!(start >= iter->end || end <= iter->start)) {
506 * the function is called when we do kvm_mtrr.head walking.
507 * Range has the minimum base address which interleaves
508 * [looker->start_max, looker->end).
510 iter->partial_map |= iter->start_max < start;
512 /* update the max address has been covered. */
513 iter->start_max = max(iter->start_max, end);
520 static void __mtrr_lookup_var_next(struct mtrr_iter *iter)
522 struct kvm_mtrr *mtrr_state = iter->mtrr_state;
524 list_for_each_entry_continue(iter->range, &mtrr_state->head, node)
525 if (match_var_range(iter, iter->range))
529 iter->partial_map |= iter->start_max < iter->end;
532 static void mtrr_lookup_var_start(struct mtrr_iter *iter)
534 struct kvm_mtrr *mtrr_state = iter->mtrr_state;
537 iter->start_max = iter->start;
539 iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
541 __mtrr_lookup_var_next(iter);
544 static void mtrr_lookup_fixed_next(struct mtrr_iter *iter)
546 /* terminate the lookup. */
547 if (fixed_mtrr_range_end_addr(iter->seg, iter->index) >= iter->end) {
555 /* have looked up for all fixed MTRRs. */
556 if (iter->index >= ARRAY_SIZE(iter->mtrr_state->fixed_ranges))
557 return mtrr_lookup_var_start(iter);
559 /* switch to next segment. */
560 if (iter->index > fixed_mtrr_seg_end_range_index(iter->seg))
564 static void mtrr_lookup_var_next(struct mtrr_iter *iter)
566 __mtrr_lookup_var_next(iter);
569 static void mtrr_lookup_start(struct mtrr_iter *iter)
571 if (!mtrr_is_enabled(iter->mtrr_state)) {
572 iter->mtrr_disabled = true;
576 if (!mtrr_lookup_fixed_start(iter))
577 mtrr_lookup_var_start(iter);
580 static void mtrr_lookup_init(struct mtrr_iter *iter,
581 struct kvm_mtrr *mtrr_state, u64 start, u64 end)
583 iter->mtrr_state = mtrr_state;
586 iter->mtrr_disabled = false;
587 iter->partial_map = false;
591 mtrr_lookup_start(iter);
594 static bool mtrr_lookup_okay(struct mtrr_iter *iter)
597 iter->mem_type = iter->mtrr_state->fixed_ranges[iter->index];
602 iter->mem_type = iter->range->base & 0xff;
609 static void mtrr_lookup_next(struct mtrr_iter *iter)
612 mtrr_lookup_fixed_next(iter);
614 mtrr_lookup_var_next(iter);
617 #define mtrr_for_each_mem_type(_iter_, _mtrr_, _gpa_start_, _gpa_end_) \
618 for (mtrr_lookup_init(_iter_, _mtrr_, _gpa_start_, _gpa_end_); \
619 mtrr_lookup_okay(_iter_); mtrr_lookup_next(_iter_))
621 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
623 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
624 struct mtrr_iter iter;
627 const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK)
628 | (1 << MTRR_TYPE_WRTHROUGH);
630 start = gfn_to_gpa(gfn);
631 end = start + PAGE_SIZE;
633 mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
634 int curr_type = iter.mem_type;
637 * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR
647 * If two or more variable memory ranges match and the
648 * memory types are identical, then that memory type is
651 if (type == curr_type)
655 * If two or more variable memory ranges match and one of
656 * the memory types is UC, the UC memory type used.
658 if (curr_type == MTRR_TYPE_UNCACHABLE)
659 return MTRR_TYPE_UNCACHABLE;
662 * If two or more variable memory ranges match and the
663 * memory types are WT and WB, the WT memory type is used.
665 if (((1 << type) & wt_wb_mask) &&
666 ((1 << curr_type) & wt_wb_mask)) {
667 type = MTRR_TYPE_WRTHROUGH;
672 * For overlaps not defined by the above rules, processor
673 * behavior is undefined.
676 /* We use WB for this undefined behavior. :( */
677 return MTRR_TYPE_WRBACK;
680 if (iter.mtrr_disabled)
681 return mtrr_disabled_type(vcpu);
683 /* not contained in any MTRRs. */
685 return mtrr_default_type(mtrr_state);
688 * We just check one page, partially covered by MTRRs is
691 WARN_ON(iter.partial_map);
695 EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
697 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
700 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
701 struct mtrr_iter iter;
705 start = gfn_to_gpa(gfn);
706 end = gfn_to_gpa(gfn + page_num);
707 mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
709 type = iter.mem_type;
713 if (type != iter.mem_type)
717 if (iter.mtrr_disabled)
720 if (!iter.partial_map)
726 return type == mtrr_default_type(mtrr_state);