Commit | Line | Data |
---|---|---|
ff53604b XG |
1 | /* |
2 | * vMTRR implementation | |
3 | * | |
4 | * Copyright (C) 2006 Qumranet, Inc. | |
5 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. | |
6 | * Copyright(C) 2015 Intel Corporation. | |
7 | * | |
8 | * Authors: | |
9 | * Yaniv Kamay <yaniv@qumranet.com> | |
10 | * Avi Kivity <avi@qumranet.com> | |
11 | * Marcelo Tosatti <mtosatti@redhat.com> | |
12 | * Paolo Bonzini <pbonzini@redhat.com> | |
13 | * Xiao Guangrong <guangrong.xiao@linux.intel.com> | |
14 | * | |
15 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
16 | * the COPYING file in the top-level directory. | |
17 | */ | |
18 | ||
19 | #include <linux/kvm_host.h> | |
20 | #include <asm/mtrr.h> | |
21 | ||
22 | #include "cpuid.h" | |
23 | #include "mmu.h" | |
24 | ||
10fac2dc XG |
25 | #define IA32_MTRR_DEF_TYPE_E (1ULL << 11) |
26 | #define IA32_MTRR_DEF_TYPE_FE (1ULL << 10) | |
27 | #define IA32_MTRR_DEF_TYPE_TYPE_MASK (0xff) | |
28 | ||
ff53604b XG |
29 | static bool msr_mtrr_valid(unsigned msr) |
30 | { | |
31 | switch (msr) { | |
32 | case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1: | |
33 | case MSR_MTRRfix64K_00000: | |
34 | case MSR_MTRRfix16K_80000: | |
35 | case MSR_MTRRfix16K_A0000: | |
36 | case MSR_MTRRfix4K_C0000: | |
37 | case MSR_MTRRfix4K_C8000: | |
38 | case MSR_MTRRfix4K_D0000: | |
39 | case MSR_MTRRfix4K_D8000: | |
40 | case MSR_MTRRfix4K_E0000: | |
41 | case MSR_MTRRfix4K_E8000: | |
42 | case MSR_MTRRfix4K_F0000: | |
43 | case MSR_MTRRfix4K_F8000: | |
44 | case MSR_MTRRdefType: | |
45 | case MSR_IA32_CR_PAT: | |
46 | return true; | |
47 | case 0x2f8: | |
48 | return true; | |
49 | } | |
50 | return false; | |
51 | } | |
52 | ||
53 | static bool valid_pat_type(unsigned t) | |
54 | { | |
55 | return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */ | |
56 | } | |
57 | ||
58 | static bool valid_mtrr_type(unsigned t) | |
59 | { | |
60 | return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ | |
61 | } | |
62 | ||
63 | bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |
64 | { | |
65 | int i; | |
66 | u64 mask; | |
67 | ||
68 | if (!msr_mtrr_valid(msr)) | |
69 | return false; | |
70 | ||
71 | if (msr == MSR_IA32_CR_PAT) { | |
72 | for (i = 0; i < 8; i++) | |
73 | if (!valid_pat_type((data >> (i * 8)) & 0xff)) | |
74 | return false; | |
75 | return true; | |
76 | } else if (msr == MSR_MTRRdefType) { | |
77 | if (data & ~0xcff) | |
78 | return false; | |
79 | return valid_mtrr_type(data & 0xff); | |
80 | } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { | |
81 | for (i = 0; i < 8 ; i++) | |
82 | if (!valid_mtrr_type((data >> (i * 8)) & 0xff)) | |
83 | return false; | |
84 | return true; | |
85 | } | |
86 | ||
87 | /* variable MTRRs */ | |
88 | WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR)); | |
89 | ||
90 | mask = (~0ULL) << cpuid_maxphyaddr(vcpu); | |
91 | if ((msr & 1) == 0) { | |
92 | /* MTRR base */ | |
93 | if (!valid_mtrr_type(data & 0xff)) | |
94 | return false; | |
95 | mask |= 0xf00; | |
96 | } else | |
97 | /* MTRR mask */ | |
98 | mask |= 0x7ff; | |
99 | if (data & mask) { | |
100 | kvm_inject_gp(vcpu, 0); | |
101 | return false; | |
102 | } | |
103 | ||
104 | return true; | |
105 | } | |
106 | EXPORT_SYMBOL_GPL(kvm_mtrr_valid); | |
107 | ||
10fac2dc XG |
108 | static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state) |
109 | { | |
110 | return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_E); | |
111 | } | |
112 | ||
113 | static bool fixed_mtrr_is_enabled(struct kvm_mtrr *mtrr_state) | |
114 | { | |
115 | return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_FE); | |
116 | } | |
117 | ||
118 | static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state) | |
119 | { | |
120 | return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; | |
121 | } | |
122 | ||
de9aef5e XG |
123 | /* |
124 | * Three terms are used in the following code: | |
125 | * - segment, it indicates the address segments covered by fixed MTRRs. | |
126 | * - unit, it corresponds to the MSR entry in the segment. | |
127 | * - range, a range is covered in one memory cache type. | |
128 | */ | |
129 | struct fixed_mtrr_segment { | |
130 | u64 start; | |
131 | u64 end; | |
132 | ||
133 | int range_shift; | |
134 | ||
135 | /* the start position in kvm_mtrr.fixed_ranges[]. */ | |
136 | int range_start; | |
137 | }; | |
138 | ||
139 | static struct fixed_mtrr_segment fixed_seg_table[] = { | |
140 | /* MSR_MTRRfix64K_00000, 1 unit. 64K fixed mtrr. */ | |
141 | { | |
142 | .start = 0x0, | |
143 | .end = 0x80000, | |
144 | .range_shift = 16, /* 64K */ | |
145 | .range_start = 0, | |
146 | }, | |
147 | ||
148 | /* | |
149 | * MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000, 2 units, | |
150 | * 16K fixed mtrr. | |
151 | */ | |
152 | { | |
153 | .start = 0x80000, | |
154 | .end = 0xc0000, | |
155 | .range_shift = 14, /* 16K */ | |
156 | .range_start = 8, | |
157 | }, | |
158 | ||
159 | /* | |
160 | * MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000, 8 units, | |
161 | * 4K fixed mtrr. | |
162 | */ | |
163 | { | |
164 | .start = 0xc0000, | |
165 | .end = 0x100000, | |
166 | .range_shift = 12, /* 12K */ | |
167 | .range_start = 24, | |
168 | } | |
169 | }; | |
170 | ||
171 | /* | |
172 | * The size of unit is covered in one MSR, one MSR entry contains | |
173 | * 8 ranges so that unit size is always 8 * 2^range_shift. | |
174 | */ | |
175 | static u64 fixed_mtrr_seg_unit_size(int seg) | |
176 | { | |
177 | return 8 << fixed_seg_table[seg].range_shift; | |
178 | } | |
179 | ||
180 | static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit) | |
181 | { | |
182 | switch (msr) { | |
183 | case MSR_MTRRfix64K_00000: | |
184 | *seg = 0; | |
185 | *unit = 0; | |
186 | break; | |
187 | case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000: | |
188 | *seg = 1; | |
189 | *unit = msr - MSR_MTRRfix16K_80000; | |
190 | break; | |
191 | case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000: | |
192 | *seg = 2; | |
193 | *unit = msr - MSR_MTRRfix4K_C0000; | |
194 | break; | |
195 | default: | |
196 | return false; | |
197 | } | |
198 | ||
199 | return true; | |
200 | } | |
201 | ||
202 | static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end) | |
203 | { | |
204 | struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg]; | |
205 | u64 unit_size = fixed_mtrr_seg_unit_size(seg); | |
206 | ||
207 | *start = mtrr_seg->start + unit * unit_size; | |
208 | *end = *start + unit_size; | |
209 | WARN_ON(*end > mtrr_seg->end); | |
210 | } | |
211 | ||
212 | static int fixed_mtrr_seg_unit_range_index(int seg, int unit) | |
213 | { | |
214 | struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg]; | |
215 | ||
216 | WARN_ON(mtrr_seg->start + unit * fixed_mtrr_seg_unit_size(seg) | |
217 | > mtrr_seg->end); | |
218 | ||
219 | /* each unit has 8 ranges. */ | |
220 | return mtrr_seg->range_start + 8 * unit; | |
221 | } | |
222 | ||
223 | static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end) | |
224 | { | |
225 | int seg, unit; | |
226 | ||
227 | if (!fixed_msr_to_seg_unit(msr, &seg, &unit)) | |
228 | return false; | |
229 | ||
230 | fixed_mtrr_seg_unit_range(seg, unit, start, end); | |
231 | return true; | |
232 | } | |
233 | ||
234 | static int fixed_msr_to_range_index(u32 msr) | |
235 | { | |
236 | int seg, unit; | |
237 | ||
238 | if (!fixed_msr_to_seg_unit(msr, &seg, &unit)) | |
239 | return -1; | |
240 | ||
241 | return fixed_mtrr_seg_unit_range_index(seg, unit); | |
242 | } | |
243 | ||
a13842dc XG |
244 | static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end) |
245 | { | |
246 | u64 mask; | |
247 | ||
248 | *start = range->base & PAGE_MASK; | |
249 | ||
250 | mask = range->mask & PAGE_MASK; | |
251 | mask |= ~0ULL << boot_cpu_data.x86_phys_bits; | |
252 | ||
253 | /* This cannot overflow because writing to the reserved bits of | |
254 | * variable MTRRs causes a #GP. | |
255 | */ | |
256 | *end = (*start | ~mask) + 1; | |
257 | } | |
258 | ||
ff53604b XG |
259 | static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr) |
260 | { | |
70109e7d | 261 | struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; |
a13842dc | 262 | gfn_t start, end; |
ff53604b | 263 | int index; |
ff53604b XG |
264 | |
265 | if (msr == MSR_IA32_CR_PAT || !tdp_enabled || | |
266 | !kvm_arch_has_noncoherent_dma(vcpu->kvm)) | |
267 | return; | |
268 | ||
10fac2dc | 269 | if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType) |
ff53604b XG |
270 | return; |
271 | ||
de9aef5e XG |
272 | /* fixed MTRRs. */ |
273 | if (fixed_msr_to_range(msr, &start, &end)) { | |
274 | if (!fixed_mtrr_is_enabled(mtrr_state)) | |
275 | return; | |
276 | } else if (msr == MSR_MTRRdefType) { | |
ff53604b XG |
277 | start = 0x0; |
278 | end = ~0ULL; | |
de9aef5e | 279 | } else { |
ff53604b | 280 | /* variable range MTRRs. */ |
ff53604b | 281 | index = (msr - 0x200) / 2; |
a13842dc | 282 | var_mtrr_range(&mtrr_state->var_ranges[index], &start, &end); |
ff53604b XG |
283 | } |
284 | ||
ff53604b XG |
285 | kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end)); |
286 | } | |
287 | ||
288 | int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |
289 | { | |
de9aef5e | 290 | int index; |
ff53604b XG |
291 | |
292 | if (!kvm_mtrr_valid(vcpu, msr, data)) | |
293 | return 1; | |
294 | ||
de9aef5e XG |
295 | index = fixed_msr_to_range_index(msr); |
296 | if (index >= 0) | |
297 | *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data; | |
298 | else if (msr == MSR_MTRRdefType) | |
10fac2dc | 299 | vcpu->arch.mtrr_state.deftype = data; |
ff53604b XG |
300 | else if (msr == MSR_IA32_CR_PAT) |
301 | vcpu->arch.pat = data; | |
302 | else { /* Variable MTRRs */ | |
de9aef5e | 303 | int is_mtrr_mask; |
ff53604b | 304 | |
de9aef5e XG |
305 | index = (msr - 0x200) / 2; |
306 | is_mtrr_mask = msr - 0x200 - 2 * index; | |
ff53604b | 307 | if (!is_mtrr_mask) |
de9aef5e | 308 | vcpu->arch.mtrr_state.var_ranges[index].base = data; |
ff53604b | 309 | else |
de9aef5e | 310 | vcpu->arch.mtrr_state.var_ranges[index].mask = data; |
ff53604b XG |
311 | } |
312 | ||
313 | update_mtrr(vcpu, msr); | |
314 | return 0; | |
315 | } | |
316 | ||
317 | int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |
318 | { | |
de9aef5e | 319 | int index; |
ff53604b | 320 | |
eb839917 XG |
321 | /* MSR_MTRRcap is a readonly MSR. */ |
322 | if (msr == MSR_MTRRcap) { | |
323 | /* | |
324 | * SMRR = 0 | |
325 | * WC = 1 | |
326 | * FIX = 1 | |
327 | * VCNT = KVM_NR_VAR_MTRR | |
328 | */ | |
329 | *pdata = 0x500 | KVM_NR_VAR_MTRR; | |
330 | return 0; | |
331 | } | |
332 | ||
ff53604b XG |
333 | if (!msr_mtrr_valid(msr)) |
334 | return 1; | |
335 | ||
de9aef5e XG |
336 | index = fixed_msr_to_range_index(msr); |
337 | if (index >= 0) | |
338 | *pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index]; | |
339 | else if (msr == MSR_MTRRdefType) | |
10fac2dc | 340 | *pdata = vcpu->arch.mtrr_state.deftype; |
ff53604b XG |
341 | else if (msr == MSR_IA32_CR_PAT) |
342 | *pdata = vcpu->arch.pat; | |
343 | else { /* Variable MTRRs */ | |
de9aef5e | 344 | int is_mtrr_mask; |
ff53604b | 345 | |
de9aef5e XG |
346 | index = (msr - 0x200) / 2; |
347 | is_mtrr_mask = msr - 0x200 - 2 * index; | |
ff53604b | 348 | if (!is_mtrr_mask) |
de9aef5e | 349 | *pdata = vcpu->arch.mtrr_state.var_ranges[index].base; |
ff53604b | 350 | else |
de9aef5e | 351 | *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask; |
ff53604b XG |
352 | } |
353 | ||
354 | return 0; | |
355 | } | |
356 | ||
3f3f78b6 | 357 | u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) |
ff53604b | 358 | { |
3f3f78b6 XG |
359 | struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; |
360 | u64 base, mask, start; | |
361 | int i, num_var_ranges, type; | |
362 | const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK) | |
363 | | (1 << MTRR_TYPE_WRTHROUGH); | |
364 | ||
365 | start = gfn_to_gpa(gfn); | |
366 | num_var_ranges = KVM_NR_VAR_MTRR; | |
367 | type = -1; | |
ff53604b XG |
368 | |
369 | /* MTRR is completely disabled, use UC for all of physical memory. */ | |
10fac2dc | 370 | if (!mtrr_is_enabled(mtrr_state)) |
ff53604b XG |
371 | return MTRR_TYPE_UNCACHABLE; |
372 | ||
ff53604b | 373 | /* Look in fixed ranges. Just return the type as per start */ |
10fac2dc | 374 | if (fixed_mtrr_is_enabled(mtrr_state) && (start < 0x100000)) { |
ff53604b XG |
375 | int idx; |
376 | ||
377 | if (start < 0x80000) { | |
378 | idx = 0; | |
379 | idx += (start >> 16); | |
380 | return mtrr_state->fixed_ranges[idx]; | |
381 | } else if (start < 0xC0000) { | |
382 | idx = 1 * 8; | |
383 | idx += ((start - 0x80000) >> 14); | |
384 | return mtrr_state->fixed_ranges[idx]; | |
385 | } else if (start < 0x1000000) { | |
386 | idx = 3 * 8; | |
387 | idx += ((start - 0xC0000) >> 12); | |
388 | return mtrr_state->fixed_ranges[idx]; | |
389 | } | |
390 | } | |
391 | ||
392 | /* | |
393 | * Look in variable ranges | |
394 | * Look of multiple ranges matching this address and pick type | |
395 | * as per MTRR precedence | |
396 | */ | |
ff53604b | 397 | for (i = 0; i < num_var_ranges; ++i) { |
3f3f78b6 | 398 | int curr_type; |
ff53604b | 399 | |
86fd5270 | 400 | if (!(mtrr_state->var_ranges[i].mask & (1 << 11))) |
ff53604b XG |
401 | continue; |
402 | ||
86fd5270 XG |
403 | base = mtrr_state->var_ranges[i].base & PAGE_MASK; |
404 | mask = mtrr_state->var_ranges[i].mask & PAGE_MASK; | |
ff53604b | 405 | |
ff53604b XG |
406 | if ((start & mask) != (base & mask)) |
407 | continue; | |
408 | ||
3f3f78b6 XG |
409 | /* |
410 | * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR | |
411 | * Precedences. | |
412 | */ | |
413 | ||
414 | curr_type = mtrr_state->var_ranges[i].base & 0xff; | |
415 | if (type == -1) { | |
416 | type = curr_type; | |
ff53604b XG |
417 | continue; |
418 | } | |
419 | ||
3f3f78b6 XG |
420 | /* |
421 | * If two or more variable memory ranges match and the | |
422 | * memory types are identical, then that memory type is | |
423 | * used. | |
424 | */ | |
425 | if (type == curr_type) | |
426 | continue; | |
427 | ||
428 | /* | |
429 | * If two or more variable memory ranges match and one of | |
430 | * the memory types is UC, the UC memory type used. | |
431 | */ | |
432 | if (curr_type == MTRR_TYPE_UNCACHABLE) | |
ff53604b XG |
433 | return MTRR_TYPE_UNCACHABLE; |
434 | ||
3f3f78b6 XG |
435 | /* |
436 | * If two or more variable memory ranges match and the | |
437 | * memory types are WT and WB, the WT memory type is used. | |
438 | */ | |
439 | if (((1 << type) & wt_wb_mask) && | |
440 | ((1 << curr_type) & wt_wb_mask)) { | |
441 | type = MTRR_TYPE_WRTHROUGH; | |
442 | continue; | |
ff53604b XG |
443 | } |
444 | ||
3f3f78b6 XG |
445 | /* |
446 | * For overlaps not defined by the above rules, processor | |
447 | * behavior is undefined. | |
448 | */ | |
449 | ||
450 | /* We use WB for this undefined behavior. :( */ | |
451 | return MTRR_TYPE_WRBACK; | |
ff53604b XG |
452 | } |
453 | ||
3f3f78b6 XG |
454 | if (type != -1) |
455 | return type; | |
ff53604b | 456 | |
10fac2dc | 457 | return mtrr_default_type(mtrr_state); |
ff53604b | 458 | } |
ff53604b | 459 | EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); |