KVM: MMU: Move kvm_free_some_pages() into critical section
[linux-block.git] / arch / x86 / kvm / paging_tmpl.h
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
20/*
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
23 */
24
25#if PTTYPE == 64
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
c7addb90 34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
cea0f0e7
AK
35 #ifdef CONFIG_X86_64
36 #define PT_MAX_FULL_LEVELS 4
b3e4e63f 37 #define CMPXCHG cmpxchg
cea0f0e7 38 #else
b3e4e63f 39 #define CMPXCHG cmpxchg64
cea0f0e7
AK
40 #define PT_MAX_FULL_LEVELS 2
41 #endif
6aa8b732
AK
42#elif PTTYPE == 32
43 #define pt_element_t u32
44 #define guest_walker guest_walker32
45 #define FNAME(name) paging##32_##name
46 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
47 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
48 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
49 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
50 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
c7addb90 51 #define PT_LEVEL_BITS PT32_LEVEL_BITS
cea0f0e7 52 #define PT_MAX_FULL_LEVELS 2
b3e4e63f 53 #define CMPXCHG cmpxchg
6aa8b732
AK
54#else
55 #error Invalid PTTYPE value
56#endif
57
5fb07ddb
AK
58#define gpte_to_gfn FNAME(gpte_to_gfn)
59#define gpte_to_gfn_pde FNAME(gpte_to_gfn_pde)
60
6aa8b732
AK
61/*
62 * The guest_walker structure emulates the behavior of the hardware page
63 * table walker.
64 */
65struct guest_walker {
66 int level;
cea0f0e7 67 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
7819026e
MT
68 pt_element_t ptes[PT_MAX_FULL_LEVELS];
69 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
fe135d2c
AK
70 unsigned pt_access;
71 unsigned pte_access;
815af8d4 72 gfn_t gfn;
7993ba43 73 u32 error_code;
6aa8b732
AK
74};
75
5fb07ddb
AK
76static gfn_t gpte_to_gfn(pt_element_t gpte)
77{
78 return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
79}
80
81static gfn_t gpte_to_gfn_pde(pt_element_t gpte)
82{
83 return (gpte & PT_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
84}
85
b3e4e63f
MT
86static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
87 gfn_t table_gfn, unsigned index,
88 pt_element_t orig_pte, pt_element_t new_pte)
89{
90 pt_element_t ret;
91 pt_element_t *table;
92 struct page *page;
93
94 page = gfn_to_page(kvm, table_gfn);
95 table = kmap_atomic(page, KM_USER0);
96
97 ret = CMPXCHG(&table[index], orig_pte, new_pte);
98
99 kunmap_atomic(table, KM_USER0);
100
101 kvm_release_page_dirty(page);
102
103 return (ret != orig_pte);
104}
105
bedbe4ee
AK
106static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
107{
108 unsigned access;
109
110 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
111#if PTTYPE == 64
112 if (is_nx(vcpu))
113 access &= ~(gpte >> PT64_NX_SHIFT);
114#endif
115 return access;
116}
117
ac79c978
AK
118/*
119 * Fetch a guest pte for a guest virtual address
120 */
7993ba43
AK
121static int FNAME(walk_addr)(struct guest_walker *walker,
122 struct kvm_vcpu *vcpu, gva_t addr,
73b1087e 123 int write_fault, int user_fault, int fetch_fault)
6aa8b732 124{
42bf3f0a 125 pt_element_t pte;
cea0f0e7 126 gfn_t table_gfn;
fe135d2c 127 unsigned index, pt_access, pte_access;
42bf3f0a 128 gpa_t pte_gpa;
6aa8b732 129
cea0f0e7 130 pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
b3e4e63f 131walk:
ad312c7c
ZX
132 walker->level = vcpu->arch.mmu.root_level;
133 pte = vcpu->arch.cr3;
1b0973bd
AK
134#if PTTYPE == 64
135 if (!is_long_mode(vcpu)) {
ad312c7c 136 pte = vcpu->arch.pdptrs[(addr >> 30) & 3];
42bf3f0a 137 if (!is_present_pte(pte))
7993ba43 138 goto not_present;
1b0973bd
AK
139 --walker->level;
140 }
141#endif
a9058ecd 142 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
f802a307 143 (vcpu->cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
6aa8b732 144
fe135d2c 145 pt_access = ACC_ALL;
ac79c978
AK
146
147 for (;;) {
42bf3f0a 148 index = PT_INDEX(addr, walker->level);
ac79c978 149
5fb07ddb 150 table_gfn = gpte_to_gfn(pte);
1755fbcc 151 pte_gpa = gfn_to_gpa(table_gfn);
ec8d4eae 152 pte_gpa += index * sizeof(pt_element_t);
42bf3f0a 153 walker->table_gfn[walker->level - 1] = table_gfn;
7819026e 154 walker->pte_gpa[walker->level - 1] = pte_gpa;
42bf3f0a
AK
155 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
156 walker->level - 1, table_gfn);
157
ec8d4eae 158 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
42bf3f0a
AK
159
160 if (!is_present_pte(pte))
7993ba43
AK
161 goto not_present;
162
42bf3f0a 163 if (write_fault && !is_writeble_pte(pte))
7993ba43
AK
164 if (user_fault || is_write_protection(vcpu))
165 goto access_error;
166
42bf3f0a 167 if (user_fault && !(pte & PT_USER_MASK))
7993ba43
AK
168 goto access_error;
169
73b1087e 170#if PTTYPE == 64
42bf3f0a 171 if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK))
73b1087e
AK
172 goto access_error;
173#endif
174
42bf3f0a 175 if (!(pte & PT_ACCESSED_MASK)) {
bf3f8e86 176 mark_page_dirty(vcpu->kvm, table_gfn);
b3e4e63f
MT
177 if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn,
178 index, pte, pte|PT_ACCESSED_MASK))
179 goto walk;
42bf3f0a 180 pte |= PT_ACCESSED_MASK;
bf3f8e86 181 }
815af8d4 182
bedbe4ee 183 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
fe135d2c 184
7819026e
MT
185 walker->ptes[walker->level - 1] = pte;
186
815af8d4 187 if (walker->level == PT_PAGE_TABLE_LEVEL) {
5fb07ddb 188 walker->gfn = gpte_to_gfn(pte);
815af8d4
AK
189 break;
190 }
191
192 if (walker->level == PT_DIRECTORY_LEVEL
42bf3f0a 193 && (pte & PT_PAGE_SIZE_MASK)
815af8d4 194 && (PTTYPE == 64 || is_pse(vcpu))) {
5fb07ddb 195 walker->gfn = gpte_to_gfn_pde(pte);
815af8d4 196 walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
da928521
AK
197 if (PTTYPE == 32 && is_cpuid_PSE36())
198 walker->gfn += pse36_gfn_delta(pte);
ac79c978 199 break;
815af8d4 200 }
ac79c978 201
fe135d2c 202 pt_access = pte_access;
ac79c978
AK
203 --walker->level;
204 }
42bf3f0a
AK
205
206 if (write_fault && !is_dirty_pte(pte)) {
b3e4e63f
MT
207 bool ret;
208
42bf3f0a 209 mark_page_dirty(vcpu->kvm, table_gfn);
b3e4e63f
MT
210 ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
211 pte|PT_DIRTY_MASK);
212 if (ret)
213 goto walk;
42bf3f0a 214 pte |= PT_DIRTY_MASK;
42bf3f0a 215 kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte));
7819026e 216 walker->ptes[walker->level - 1] = pte;
42bf3f0a
AK
217 }
218
fe135d2c
AK
219 walker->pt_access = pt_access;
220 walker->pte_access = pte_access;
221 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
222 __FUNCTION__, (u64)pte, pt_access, pte_access);
7993ba43
AK
223 return 1;
224
225not_present:
226 walker->error_code = 0;
227 goto err;
228
229access_error:
230 walker->error_code = PFERR_PRESENT_MASK;
231
232err:
233 if (write_fault)
234 walker->error_code |= PFERR_WRITE_MASK;
235 if (user_fault)
236 walker->error_code |= PFERR_USER_MASK;
73b1087e
AK
237 if (fetch_fault)
238 walker->error_code |= PFERR_FETCH_MASK;
fe551881 239 return 0;
6aa8b732
AK
240}
241
0028425f 242static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
c7addb90
AK
243 u64 *spte, const void *pte, int bytes,
244 int offset_in_pte)
0028425f
AK
245{
246 pt_element_t gpte;
41074d07 247 unsigned pte_access;
d7824fff 248 struct page *npage;
0028425f 249
0028425f 250 gpte = *(const pt_element_t *)pte;
c7addb90
AK
251 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
252 if (!offset_in_pte && !is_present_pte(gpte))
253 set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
254 return;
255 }
256 if (bytes < sizeof(pt_element_t))
0028425f
AK
257 return;
258 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
41074d07 259 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
d7824fff
AK
260 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
261 return;
262 npage = vcpu->arch.update_pte.page;
263 if (!npage)
264 return;
265 get_page(npage);
1c4f1fd6 266 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
d7824fff 267 gpte & PT_DIRTY_MASK, NULL, gpte_to_gfn(gpte), npage);
0028425f
AK
268}
269
6aa8b732
AK
270/*
271 * Fetch a shadow pte for a specific level in the paging hierarchy.
272 */
273static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
97a0a01e 274 struct guest_walker *walker,
d7824fff
AK
275 int user_fault, int write_fault, int *ptwrite,
276 struct page *page)
6aa8b732
AK
277{
278 hpa_t shadow_addr;
279 int level;
ef0197e8 280 u64 *shadow_ent;
fe135d2c 281 unsigned access = walker->pt_access;
ac79c978 282
7819026e 283 if (!is_present_pte(walker->ptes[walker->level - 1]))
ac79c978 284 return NULL;
6aa8b732 285
ad312c7c
ZX
286 shadow_addr = vcpu->arch.mmu.root_hpa;
287 level = vcpu->arch.mmu.shadow_root_level;
aef3d3fe 288 if (level == PT32E_ROOT_LEVEL) {
ad312c7c 289 shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
aef3d3fe
AK
290 shadow_addr &= PT64_BASE_ADDR_MASK;
291 --level;
292 }
6aa8b732
AK
293
294 for (; ; level--) {
295 u32 index = SHADOW_PT_INDEX(addr, level);
25c0de2c 296 struct kvm_mmu_page *shadow_page;
8c7bb723 297 u64 shadow_pte;
cea0f0e7
AK
298 int metaphysical;
299 gfn_t table_gfn;
7819026e 300 bool new_page = 0;
6aa8b732 301
ef0197e8 302 shadow_ent = ((u64 *)__va(shadow_addr)) + index;
c7addb90 303 if (is_shadow_present_pte(*shadow_ent)) {
6aa8b732 304 if (level == PT_PAGE_TABLE_LEVEL)
97a0a01e 305 break;
6aa8b732 306 shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
6aa8b732
AK
307 continue;
308 }
309
ef0197e8
AK
310 if (level == PT_PAGE_TABLE_LEVEL)
311 break;
6aa8b732 312
cea0f0e7
AK
313 if (level - 1 == PT_PAGE_TABLE_LEVEL
314 && walker->level == PT_DIRECTORY_LEVEL) {
315 metaphysical = 1;
7819026e 316 if (!is_dirty_pte(walker->ptes[level - 1]))
fe135d2c 317 access &= ~ACC_WRITE_MASK;
7819026e 318 table_gfn = gpte_to_gfn(walker->ptes[level - 1]);
cea0f0e7
AK
319 } else {
320 metaphysical = 0;
321 table_gfn = walker->table_gfn[level - 2];
322 }
323 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
fe135d2c 324 metaphysical, access,
7819026e
MT
325 shadow_ent, &new_page);
326 if (new_page && !metaphysical) {
7ec54588 327 int r;
7819026e 328 pt_element_t curr_pte;
7ec54588
MT
329 r = kvm_read_guest_atomic(vcpu->kvm,
330 walker->pte_gpa[level - 2],
331 &curr_pte, sizeof(curr_pte));
d7824fff
AK
332 if (r || curr_pte != walker->ptes[level - 2]) {
333 kvm_release_page_clean(page);
7819026e 334 return NULL;
d7824fff 335 }
7819026e 336 }
47ad8e68 337 shadow_addr = __pa(shadow_page->spt);
aef3d3fe
AK
338 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
339 | PT_WRITABLE_MASK | PT_USER_MASK;
8c7bb723 340 *shadow_ent = shadow_pte;
6aa8b732 341 }
ef0197e8 342
1c4f1fd6 343 mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
7819026e
MT
344 user_fault, write_fault,
345 walker->ptes[walker->level-1] & PT_DIRTY_MASK,
d7824fff 346 ptwrite, walker->gfn, page);
050e6499 347
ef0197e8 348 return shadow_ent;
6aa8b732
AK
349}
350
6aa8b732
AK
351/*
352 * Page fault handler. There are several causes for a page fault:
353 * - there is no shadow pte for the guest pte
354 * - write access through a shadow pte marked read only so that we can set
355 * the dirty bit
356 * - write access to a shadow pte marked read only so we can update the page
357 * dirty bitmap, when userspace requests it
358 * - mmio access; in this case we will never install a present shadow pte
359 * - normal guest page fault due to the guest pte marked not present, not
360 * writable, or not executable
361 *
e2dec939
AK
362 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
363 * a negative value on error.
6aa8b732
AK
364 */
365static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
366 u32 error_code)
367{
368 int write_fault = error_code & PFERR_WRITE_MASK;
6aa8b732 369 int user_fault = error_code & PFERR_USER_MASK;
73b1087e 370 int fetch_fault = error_code & PFERR_FETCH_MASK;
6aa8b732
AK
371 struct guest_walker walker;
372 u64 *shadow_pte;
cea0f0e7 373 int write_pt = 0;
e2dec939 374 int r;
d7824fff 375 struct page *page;
6aa8b732 376
cea0f0e7 377 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
37a7d8b0 378 kvm_mmu_audit(vcpu, "pre page fault");
714b93da 379
e2dec939
AK
380 r = mmu_topup_memory_caches(vcpu);
381 if (r)
382 return r;
714b93da 383
10589a46 384 down_read(&current->mm->mmap_sem);
6aa8b732
AK
385 /*
386 * Look up the shadow pte for the faulting address.
387 */
73b1087e
AK
388 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
389 fetch_fault);
6aa8b732
AK
390
391 /*
392 * The page is not mapped by the guest. Let the guest handle it.
393 */
7993ba43
AK
394 if (!r) {
395 pgprintk("%s: guest page fault\n", __FUNCTION__);
396 inject_page_fault(vcpu, addr, walker.error_code);
ad312c7c 397 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
aaee2c94 398 up_read(&current->mm->mmap_sem);
6aa8b732
AK
399 return 0;
400 }
401
d7824fff
AK
402 page = gfn_to_page(vcpu->kvm, walker.gfn);
403
aaee2c94 404 spin_lock(&vcpu->kvm->mmu_lock);
eb787d10 405 kvm_mmu_free_some_pages(vcpu);
97a0a01e 406 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
d7824fff 407 &write_pt, page);
97a0a01e
AK
408 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
409 shadow_pte, *shadow_pte, write_pt);
cea0f0e7 410
a25f7e1f 411 if (!write_pt)
ad312c7c 412 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
a25f7e1f 413
6aa8b732
AK
414 /*
415 * mmio: emulate if accessible, otherwise its a guest fault.
416 */
10589a46 417 if (shadow_pte && is_io_pte(*shadow_pte)) {
aaee2c94
MT
418 spin_unlock(&vcpu->kvm->mmu_lock);
419 up_read(&current->mm->mmap_sem);
7993ba43 420 return 1;
10589a46 421 }
6aa8b732 422
1165f5fe 423 ++vcpu->stat.pf_fixed;
37a7d8b0 424 kvm_mmu_audit(vcpu, "post page fault (fixed)");
aaee2c94
MT
425 spin_unlock(&vcpu->kvm->mmu_lock);
426 up_read(&current->mm->mmap_sem);
6aa8b732 427
cea0f0e7 428 return write_pt;
6aa8b732
AK
429}
430
431static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
432{
433 struct guest_walker walker;
e119d117
AK
434 gpa_t gpa = UNMAPPED_GVA;
435 int r;
6aa8b732 436
e119d117 437 r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
6aa8b732 438
e119d117 439 if (r) {
1755fbcc 440 gpa = gfn_to_gpa(walker.gfn);
e119d117 441 gpa |= vaddr & ~PAGE_MASK;
6aa8b732
AK
442 }
443
444 return gpa;
445}
446
c7addb90
AK
447static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
448 struct kvm_mmu_page *sp)
449{
7ec54588
MT
450 int i, offset = 0, r = 0;
451 pt_element_t pt;
c7addb90 452
e5a4c8ca
AK
453 if (sp->role.metaphysical
454 || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
c7addb90
AK
455 nonpaging_prefetch_page(vcpu, sp);
456 return;
457 }
458
e5a4c8ca
AK
459 if (PTTYPE == 32)
460 offset = sp->role.quadrant << PT64_LEVEL_BITS;
7ec54588
MT
461
462 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
463 gpa_t pte_gpa = gfn_to_gpa(sp->gfn);
464 pte_gpa += (i+offset) * sizeof(pt_element_t);
465
466 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &pt,
467 sizeof(pt_element_t));
468 if (r || is_present_pte(pt))
c7addb90
AK
469 sp->spt[i] = shadow_trap_nonpresent_pte;
470 else
471 sp->spt[i] = shadow_notrap_nonpresent_pte;
7ec54588 472 }
c7addb90
AK
473}
474
6aa8b732
AK
475#undef pt_element_t
476#undef guest_walker
477#undef FNAME
478#undef PT_BASE_ADDR_MASK
479#undef PT_INDEX
480#undef SHADOW_PT_INDEX
481#undef PT_LEVEL_MASK
6aa8b732 482#undef PT_DIR_BASE_ADDR_MASK
c7addb90 483#undef PT_LEVEL_BITS
cea0f0e7 484#undef PT_MAX_FULL_LEVELS
5fb07ddb
AK
485#undef gpte_to_gfn
486#undef gpte_to_gfn_pde
b3e4e63f 487#undef CMPXCHG