Commit | Line | Data |
---|---|---|
ff53604b XG |
1 | /* |
2 | * vMTRR implementation | |
3 | * | |
4 | * Copyright (C) 2006 Qumranet, Inc. | |
5 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. | |
6 | * Copyright(C) 2015 Intel Corporation. | |
7 | * | |
8 | * Authors: | |
9 | * Yaniv Kamay <yaniv@qumranet.com> | |
10 | * Avi Kivity <avi@qumranet.com> | |
11 | * Marcelo Tosatti <mtosatti@redhat.com> | |
12 | * Paolo Bonzini <pbonzini@redhat.com> | |
13 | * Xiao Guangrong <guangrong.xiao@linux.intel.com> | |
14 | * | |
15 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
16 | * the COPYING file in the top-level directory. | |
17 | */ | |
18 | ||
19 | #include <linux/kvm_host.h> | |
20 | #include <asm/mtrr.h> | |
21 | ||
22 | #include "cpuid.h" | |
23 | #include "mmu.h" | |
24 | ||
10fac2dc XG |
25 | #define IA32_MTRR_DEF_TYPE_E (1ULL << 11) |
26 | #define IA32_MTRR_DEF_TYPE_FE (1ULL << 10) | |
27 | #define IA32_MTRR_DEF_TYPE_TYPE_MASK (0xff) | |
28 | ||
ff53604b XG |
29 | static bool msr_mtrr_valid(unsigned msr) |
30 | { | |
31 | switch (msr) { | |
32 | case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1: | |
33 | case MSR_MTRRfix64K_00000: | |
34 | case MSR_MTRRfix16K_80000: | |
35 | case MSR_MTRRfix16K_A0000: | |
36 | case MSR_MTRRfix4K_C0000: | |
37 | case MSR_MTRRfix4K_C8000: | |
38 | case MSR_MTRRfix4K_D0000: | |
39 | case MSR_MTRRfix4K_D8000: | |
40 | case MSR_MTRRfix4K_E0000: | |
41 | case MSR_MTRRfix4K_E8000: | |
42 | case MSR_MTRRfix4K_F0000: | |
43 | case MSR_MTRRfix4K_F8000: | |
44 | case MSR_MTRRdefType: | |
45 | case MSR_IA32_CR_PAT: | |
46 | return true; | |
47 | case 0x2f8: | |
48 | return true; | |
49 | } | |
50 | return false; | |
51 | } | |
52 | ||
53 | static bool valid_pat_type(unsigned t) | |
54 | { | |
55 | return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */ | |
56 | } | |
57 | ||
58 | static bool valid_mtrr_type(unsigned t) | |
59 | { | |
60 | return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ | |
61 | } | |
62 | ||
63 | bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |
64 | { | |
65 | int i; | |
66 | u64 mask; | |
67 | ||
68 | if (!msr_mtrr_valid(msr)) | |
69 | return false; | |
70 | ||
71 | if (msr == MSR_IA32_CR_PAT) { | |
72 | for (i = 0; i < 8; i++) | |
73 | if (!valid_pat_type((data >> (i * 8)) & 0xff)) | |
74 | return false; | |
75 | return true; | |
76 | } else if (msr == MSR_MTRRdefType) { | |
77 | if (data & ~0xcff) | |
78 | return false; | |
79 | return valid_mtrr_type(data & 0xff); | |
80 | } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { | |
81 | for (i = 0; i < 8 ; i++) | |
82 | if (!valid_mtrr_type((data >> (i * 8)) & 0xff)) | |
83 | return false; | |
84 | return true; | |
85 | } | |
86 | ||
87 | /* variable MTRRs */ | |
88 | WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR)); | |
89 | ||
90 | mask = (~0ULL) << cpuid_maxphyaddr(vcpu); | |
91 | if ((msr & 1) == 0) { | |
92 | /* MTRR base */ | |
93 | if (!valid_mtrr_type(data & 0xff)) | |
94 | return false; | |
95 | mask |= 0xf00; | |
96 | } else | |
97 | /* MTRR mask */ | |
98 | mask |= 0x7ff; | |
99 | if (data & mask) { | |
100 | kvm_inject_gp(vcpu, 0); | |
101 | return false; | |
102 | } | |
103 | ||
104 | return true; | |
105 | } | |
106 | EXPORT_SYMBOL_GPL(kvm_mtrr_valid); | |
107 | ||
10fac2dc XG |
108 | static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state) |
109 | { | |
110 | return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_E); | |
111 | } | |
112 | ||
113 | static bool fixed_mtrr_is_enabled(struct kvm_mtrr *mtrr_state) | |
114 | { | |
115 | return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_FE); | |
116 | } | |
117 | ||
118 | static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state) | |
119 | { | |
120 | return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; | |
121 | } | |
122 | ||
de9aef5e XG |
123 | /* |
124 | * Three terms are used in the following code: | |
125 | * - segment, it indicates the address segments covered by fixed MTRRs. | |
126 | * - unit, it corresponds to the MSR entry in the segment. | |
127 | * - range, a range is covered in one memory cache type. | |
128 | */ | |
129 | struct fixed_mtrr_segment { | |
130 | u64 start; | |
131 | u64 end; | |
132 | ||
133 | int range_shift; | |
134 | ||
135 | /* the start position in kvm_mtrr.fixed_ranges[]. */ | |
136 | int range_start; | |
137 | }; | |
138 | ||
139 | static struct fixed_mtrr_segment fixed_seg_table[] = { | |
140 | /* MSR_MTRRfix64K_00000, 1 unit. 64K fixed mtrr. */ | |
141 | { | |
142 | .start = 0x0, | |
143 | .end = 0x80000, | |
144 | .range_shift = 16, /* 64K */ | |
145 | .range_start = 0, | |
146 | }, | |
147 | ||
148 | /* | |
149 | * MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000, 2 units, | |
150 | * 16K fixed mtrr. | |
151 | */ | |
152 | { | |
153 | .start = 0x80000, | |
154 | .end = 0xc0000, | |
155 | .range_shift = 14, /* 16K */ | |
156 | .range_start = 8, | |
157 | }, | |
158 | ||
159 | /* | |
160 | * MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000, 8 units, | |
161 | * 4K fixed mtrr. | |
162 | */ | |
163 | { | |
164 | .start = 0xc0000, | |
165 | .end = 0x100000, | |
166 | .range_shift = 12, /* 12K */ | |
167 | .range_start = 24, | |
168 | } | |
169 | }; | |
170 | ||
171 | /* | |
172 | * The size of unit is covered in one MSR, one MSR entry contains | |
173 | * 8 ranges so that unit size is always 8 * 2^range_shift. | |
174 | */ | |
175 | static u64 fixed_mtrr_seg_unit_size(int seg) | |
176 | { | |
177 | return 8 << fixed_seg_table[seg].range_shift; | |
178 | } | |
179 | ||
180 | static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit) | |
181 | { | |
182 | switch (msr) { | |
183 | case MSR_MTRRfix64K_00000: | |
184 | *seg = 0; | |
185 | *unit = 0; | |
186 | break; | |
187 | case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000: | |
188 | *seg = 1; | |
189 | *unit = msr - MSR_MTRRfix16K_80000; | |
190 | break; | |
191 | case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000: | |
192 | *seg = 2; | |
193 | *unit = msr - MSR_MTRRfix4K_C0000; | |
194 | break; | |
195 | default: | |
196 | return false; | |
197 | } | |
198 | ||
199 | return true; | |
200 | } | |
201 | ||
202 | static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end) | |
203 | { | |
204 | struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg]; | |
205 | u64 unit_size = fixed_mtrr_seg_unit_size(seg); | |
206 | ||
207 | *start = mtrr_seg->start + unit * unit_size; | |
208 | *end = *start + unit_size; | |
209 | WARN_ON(*end > mtrr_seg->end); | |
210 | } | |
211 | ||
212 | static int fixed_mtrr_seg_unit_range_index(int seg, int unit) | |
213 | { | |
214 | struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg]; | |
215 | ||
216 | WARN_ON(mtrr_seg->start + unit * fixed_mtrr_seg_unit_size(seg) | |
217 | > mtrr_seg->end); | |
218 | ||
219 | /* each unit has 8 ranges. */ | |
220 | return mtrr_seg->range_start + 8 * unit; | |
221 | } | |
222 | ||
223 | static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end) | |
224 | { | |
225 | int seg, unit; | |
226 | ||
227 | if (!fixed_msr_to_seg_unit(msr, &seg, &unit)) | |
228 | return false; | |
229 | ||
230 | fixed_mtrr_seg_unit_range(seg, unit, start, end); | |
231 | return true; | |
232 | } | |
233 | ||
234 | static int fixed_msr_to_range_index(u32 msr) | |
235 | { | |
236 | int seg, unit; | |
237 | ||
238 | if (!fixed_msr_to_seg_unit(msr, &seg, &unit)) | |
239 | return -1; | |
240 | ||
241 | return fixed_mtrr_seg_unit_range_index(seg, unit); | |
242 | } | |
243 | ||
ff53604b XG |
244 | static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr) |
245 | { | |
70109e7d | 246 | struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; |
ff53604b XG |
247 | gfn_t start, end, mask; |
248 | int index; | |
ff53604b XG |
249 | |
250 | if (msr == MSR_IA32_CR_PAT || !tdp_enabled || | |
251 | !kvm_arch_has_noncoherent_dma(vcpu->kvm)) | |
252 | return; | |
253 | ||
10fac2dc | 254 | if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType) |
ff53604b XG |
255 | return; |
256 | ||
de9aef5e XG |
257 | /* fixed MTRRs. */ |
258 | if (fixed_msr_to_range(msr, &start, &end)) { | |
259 | if (!fixed_mtrr_is_enabled(mtrr_state)) | |
260 | return; | |
261 | } else if (msr == MSR_MTRRdefType) { | |
ff53604b XG |
262 | start = 0x0; |
263 | end = ~0ULL; | |
de9aef5e | 264 | } else { |
ff53604b | 265 | /* variable range MTRRs. */ |
ff53604b | 266 | index = (msr - 0x200) / 2; |
86fd5270 XG |
267 | start = mtrr_state->var_ranges[index].base & PAGE_MASK; |
268 | mask = mtrr_state->var_ranges[index].mask & PAGE_MASK; | |
ff53604b XG |
269 | mask |= ~0ULL << cpuid_maxphyaddr(vcpu); |
270 | ||
271 | end = ((start & mask) | ~mask) + 1; | |
272 | } | |
273 | ||
ff53604b XG |
274 | kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end)); |
275 | } | |
276 | ||
277 | int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |
278 | { | |
de9aef5e | 279 | int index; |
ff53604b XG |
280 | |
281 | if (!kvm_mtrr_valid(vcpu, msr, data)) | |
282 | return 1; | |
283 | ||
de9aef5e XG |
284 | index = fixed_msr_to_range_index(msr); |
285 | if (index >= 0) | |
286 | *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data; | |
287 | else if (msr == MSR_MTRRdefType) | |
10fac2dc | 288 | vcpu->arch.mtrr_state.deftype = data; |
ff53604b XG |
289 | else if (msr == MSR_IA32_CR_PAT) |
290 | vcpu->arch.pat = data; | |
291 | else { /* Variable MTRRs */ | |
de9aef5e | 292 | int is_mtrr_mask; |
ff53604b | 293 | |
de9aef5e XG |
294 | index = (msr - 0x200) / 2; |
295 | is_mtrr_mask = msr - 0x200 - 2 * index; | |
ff53604b | 296 | if (!is_mtrr_mask) |
de9aef5e | 297 | vcpu->arch.mtrr_state.var_ranges[index].base = data; |
ff53604b | 298 | else |
de9aef5e | 299 | vcpu->arch.mtrr_state.var_ranges[index].mask = data; |
ff53604b XG |
300 | } |
301 | ||
302 | update_mtrr(vcpu, msr); | |
303 | return 0; | |
304 | } | |
305 | ||
306 | int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |
307 | { | |
de9aef5e | 308 | int index; |
ff53604b | 309 | |
eb839917 XG |
310 | /* MSR_MTRRcap is a readonly MSR. */ |
311 | if (msr == MSR_MTRRcap) { | |
312 | /* | |
313 | * SMRR = 0 | |
314 | * WC = 1 | |
315 | * FIX = 1 | |
316 | * VCNT = KVM_NR_VAR_MTRR | |
317 | */ | |
318 | *pdata = 0x500 | KVM_NR_VAR_MTRR; | |
319 | return 0; | |
320 | } | |
321 | ||
ff53604b XG |
322 | if (!msr_mtrr_valid(msr)) |
323 | return 1; | |
324 | ||
de9aef5e XG |
325 | index = fixed_msr_to_range_index(msr); |
326 | if (index >= 0) | |
327 | *pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index]; | |
328 | else if (msr == MSR_MTRRdefType) | |
10fac2dc | 329 | *pdata = vcpu->arch.mtrr_state.deftype; |
ff53604b XG |
330 | else if (msr == MSR_IA32_CR_PAT) |
331 | *pdata = vcpu->arch.pat; | |
332 | else { /* Variable MTRRs */ | |
de9aef5e | 333 | int is_mtrr_mask; |
ff53604b | 334 | |
de9aef5e XG |
335 | index = (msr - 0x200) / 2; |
336 | is_mtrr_mask = msr - 0x200 - 2 * index; | |
ff53604b | 337 | if (!is_mtrr_mask) |
de9aef5e | 338 | *pdata = vcpu->arch.mtrr_state.var_ranges[index].base; |
ff53604b | 339 | else |
de9aef5e | 340 | *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask; |
ff53604b XG |
341 | } |
342 | ||
343 | return 0; | |
344 | } | |
345 | ||
3f3f78b6 | 346 | u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) |
ff53604b | 347 | { |
3f3f78b6 XG |
348 | struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; |
349 | u64 base, mask, start; | |
350 | int i, num_var_ranges, type; | |
351 | const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK) | |
352 | | (1 << MTRR_TYPE_WRTHROUGH); | |
353 | ||
354 | start = gfn_to_gpa(gfn); | |
355 | num_var_ranges = KVM_NR_VAR_MTRR; | |
356 | type = -1; | |
ff53604b XG |
357 | |
358 | /* MTRR is completely disabled, use UC for all of physical memory. */ | |
10fac2dc | 359 | if (!mtrr_is_enabled(mtrr_state)) |
ff53604b XG |
360 | return MTRR_TYPE_UNCACHABLE; |
361 | ||
ff53604b | 362 | /* Look in fixed ranges. Just return the type as per start */ |
10fac2dc | 363 | if (fixed_mtrr_is_enabled(mtrr_state) && (start < 0x100000)) { |
ff53604b XG |
364 | int idx; |
365 | ||
366 | if (start < 0x80000) { | |
367 | idx = 0; | |
368 | idx += (start >> 16); | |
369 | return mtrr_state->fixed_ranges[idx]; | |
370 | } else if (start < 0xC0000) { | |
371 | idx = 1 * 8; | |
372 | idx += ((start - 0x80000) >> 14); | |
373 | return mtrr_state->fixed_ranges[idx]; | |
374 | } else if (start < 0x1000000) { | |
375 | idx = 3 * 8; | |
376 | idx += ((start - 0xC0000) >> 12); | |
377 | return mtrr_state->fixed_ranges[idx]; | |
378 | } | |
379 | } | |
380 | ||
381 | /* | |
382 | * Look in variable ranges | |
383 | * Look of multiple ranges matching this address and pick type | |
384 | * as per MTRR precedence | |
385 | */ | |
ff53604b | 386 | for (i = 0; i < num_var_ranges; ++i) { |
3f3f78b6 | 387 | int curr_type; |
ff53604b | 388 | |
86fd5270 | 389 | if (!(mtrr_state->var_ranges[i].mask & (1 << 11))) |
ff53604b XG |
390 | continue; |
391 | ||
86fd5270 XG |
392 | base = mtrr_state->var_ranges[i].base & PAGE_MASK; |
393 | mask = mtrr_state->var_ranges[i].mask & PAGE_MASK; | |
ff53604b | 394 | |
ff53604b XG |
395 | if ((start & mask) != (base & mask)) |
396 | continue; | |
397 | ||
3f3f78b6 XG |
398 | /* |
399 | * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR | |
400 | * Precedences. | |
401 | */ | |
402 | ||
403 | curr_type = mtrr_state->var_ranges[i].base & 0xff; | |
404 | if (type == -1) { | |
405 | type = curr_type; | |
ff53604b XG |
406 | continue; |
407 | } | |
408 | ||
3f3f78b6 XG |
409 | /* |
410 | * If two or more variable memory ranges match and the | |
411 | * memory types are identical, then that memory type is | |
412 | * used. | |
413 | */ | |
414 | if (type == curr_type) | |
415 | continue; | |
416 | ||
417 | /* | |
418 | * If two or more variable memory ranges match and one of | |
419 | * the memory types is UC, the UC memory type used. | |
420 | */ | |
421 | if (curr_type == MTRR_TYPE_UNCACHABLE) | |
ff53604b XG |
422 | return MTRR_TYPE_UNCACHABLE; |
423 | ||
3f3f78b6 XG |
424 | /* |
425 | * If two or more variable memory ranges match and the | |
426 | * memory types are WT and WB, the WT memory type is used. | |
427 | */ | |
428 | if (((1 << type) & wt_wb_mask) && | |
429 | ((1 << curr_type) & wt_wb_mask)) { | |
430 | type = MTRR_TYPE_WRTHROUGH; | |
431 | continue; | |
ff53604b XG |
432 | } |
433 | ||
3f3f78b6 XG |
434 | /* |
435 | * For overlaps not defined by the above rules, processor | |
436 | * behavior is undefined. | |
437 | */ | |
438 | ||
439 | /* We use WB for this undefined behavior. :( */ | |
440 | return MTRR_TYPE_WRBACK; | |
ff53604b XG |
441 | } |
442 | ||
3f3f78b6 XG |
443 | if (type != -1) |
444 | return type; | |
ff53604b | 445 | |
10fac2dc | 446 | return mtrr_default_type(mtrr_state); |
ff53604b | 447 | } |
ff53604b | 448 | EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); |