Commit | Line | Data |
---|---|---|
6aa8b732 AK |
1 | /* |
2 | * Kernel-based Virtual Machine driver for Linux | |
3 | * | |
4 | * This module enables machines with Intel VT-x extensions to run virtual | |
5 | * machines without emulation or binary translation. | |
6 | * | |
7 | * MMU support | |
8 | * | |
9 | * Copyright (C) 2006 Qumranet, Inc. | |
10 | * | |
11 | * Authors: | |
12 | * Yaniv Kamay <yaniv@qumranet.com> | |
13 | * Avi Kivity <avi@qumranet.com> | |
14 | * | |
15 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
16 | * the COPYING file in the top-level directory. | |
17 | * | |
18 | */ | |
19 | #include <linux/types.h> | |
20 | #include <linux/string.h> | |
21 | #include <asm/page.h> | |
22 | #include <linux/mm.h> | |
23 | #include <linux/highmem.h> | |
24 | #include <linux/module.h> | |
25 | ||
26 | #include "vmx.h" | |
27 | #include "kvm.h" | |
28 | ||
cea0f0e7 AK |
29 | #define pgprintk(x...) do { printk(x); } while (0) |
30 | #define rmap_printk(x...) do { printk(x); } while (0) | |
6aa8b732 AK |
31 | |
32 | #define ASSERT(x) \ | |
33 | if (!(x)) { \ | |
34 | printk(KERN_WARNING "assertion failed %s:%d: %s\n", \ | |
35 | __FILE__, __LINE__, #x); \ | |
36 | } | |
37 | ||
cea0f0e7 AK |
38 | #define PT64_PT_BITS 9 |
39 | #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS) | |
40 | #define PT32_PT_BITS 10 | |
41 | #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS) | |
6aa8b732 AK |
42 | |
43 | #define PT_WRITABLE_SHIFT 1 | |
44 | ||
45 | #define PT_PRESENT_MASK (1ULL << 0) | |
46 | #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT) | |
47 | #define PT_USER_MASK (1ULL << 2) | |
48 | #define PT_PWT_MASK (1ULL << 3) | |
49 | #define PT_PCD_MASK (1ULL << 4) | |
50 | #define PT_ACCESSED_MASK (1ULL << 5) | |
51 | #define PT_DIRTY_MASK (1ULL << 6) | |
52 | #define PT_PAGE_SIZE_MASK (1ULL << 7) | |
53 | #define PT_PAT_MASK (1ULL << 7) | |
54 | #define PT_GLOBAL_MASK (1ULL << 8) | |
55 | #define PT64_NX_MASK (1ULL << 63) | |
56 | ||
57 | #define PT_PAT_SHIFT 7 | |
58 | #define PT_DIR_PAT_SHIFT 12 | |
59 | #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT) | |
60 | ||
61 | #define PT32_DIR_PSE36_SIZE 4 | |
62 | #define PT32_DIR_PSE36_SHIFT 13 | |
63 | #define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT) | |
64 | ||
65 | ||
66 | #define PT32_PTE_COPY_MASK \ | |
8c7bb723 | 67 | (PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_GLOBAL_MASK) |
6aa8b732 | 68 | |
8c7bb723 | 69 | #define PT64_PTE_COPY_MASK (PT64_NX_MASK | PT32_PTE_COPY_MASK) |
6aa8b732 AK |
70 | |
71 | #define PT_FIRST_AVAIL_BITS_SHIFT 9 | |
72 | #define PT64_SECOND_AVAIL_BITS_SHIFT 52 | |
73 | ||
74 | #define PT_SHADOW_PS_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) | |
75 | #define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) | |
76 | ||
77 | #define PT_SHADOW_WRITABLE_SHIFT (PT_FIRST_AVAIL_BITS_SHIFT + 1) | |
78 | #define PT_SHADOW_WRITABLE_MASK (1ULL << PT_SHADOW_WRITABLE_SHIFT) | |
79 | ||
80 | #define PT_SHADOW_USER_SHIFT (PT_SHADOW_WRITABLE_SHIFT + 1) | |
81 | #define PT_SHADOW_USER_MASK (1ULL << (PT_SHADOW_USER_SHIFT)) | |
82 | ||
83 | #define PT_SHADOW_BITS_OFFSET (PT_SHADOW_WRITABLE_SHIFT - PT_WRITABLE_SHIFT) | |
84 | ||
85 | #define VALID_PAGE(x) ((x) != INVALID_PAGE) | |
86 | ||
87 | #define PT64_LEVEL_BITS 9 | |
88 | ||
89 | #define PT64_LEVEL_SHIFT(level) \ | |
90 | ( PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS ) | |
91 | ||
92 | #define PT64_LEVEL_MASK(level) \ | |
93 | (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level)) | |
94 | ||
95 | #define PT64_INDEX(address, level)\ | |
96 | (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1)) | |
97 | ||
98 | ||
99 | #define PT32_LEVEL_BITS 10 | |
100 | ||
101 | #define PT32_LEVEL_SHIFT(level) \ | |
102 | ( PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS ) | |
103 | ||
104 | #define PT32_LEVEL_MASK(level) \ | |
105 | (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level)) | |
106 | ||
107 | #define PT32_INDEX(address, level)\ | |
108 | (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1)) | |
109 | ||
110 | ||
111 | #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & PAGE_MASK) | |
112 | #define PT64_DIR_BASE_ADDR_MASK \ | |
113 | (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1)) | |
114 | ||
115 | #define PT32_BASE_ADDR_MASK PAGE_MASK | |
116 | #define PT32_DIR_BASE_ADDR_MASK \ | |
117 | (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1)) | |
118 | ||
119 | ||
120 | #define PFERR_PRESENT_MASK (1U << 0) | |
121 | #define PFERR_WRITE_MASK (1U << 1) | |
122 | #define PFERR_USER_MASK (1U << 2) | |
123 | ||
124 | #define PT64_ROOT_LEVEL 4 | |
125 | #define PT32_ROOT_LEVEL 2 | |
126 | #define PT32E_ROOT_LEVEL 3 | |
127 | ||
128 | #define PT_DIRECTORY_LEVEL 2 | |
129 | #define PT_PAGE_TABLE_LEVEL 1 | |
130 | ||
cd4a4e53 AK |
131 | #define RMAP_EXT 4 |
132 | ||
133 | struct kvm_rmap_desc { | |
134 | u64 *shadow_ptes[RMAP_EXT]; | |
135 | struct kvm_rmap_desc *more; | |
136 | }; | |
137 | ||
6aa8b732 AK |
138 | static int is_write_protection(struct kvm_vcpu *vcpu) |
139 | { | |
140 | return vcpu->cr0 & CR0_WP_MASK; | |
141 | } | |
142 | ||
143 | static int is_cpuid_PSE36(void) | |
144 | { | |
145 | return 1; | |
146 | } | |
147 | ||
148 | static int is_present_pte(unsigned long pte) | |
149 | { | |
150 | return pte & PT_PRESENT_MASK; | |
151 | } | |
152 | ||
153 | static int is_writeble_pte(unsigned long pte) | |
154 | { | |
155 | return pte & PT_WRITABLE_MASK; | |
156 | } | |
157 | ||
158 | static int is_io_pte(unsigned long pte) | |
159 | { | |
160 | return pte & PT_SHADOW_IO_MARK; | |
161 | } | |
162 | ||
cd4a4e53 AK |
163 | static int is_rmap_pte(u64 pte) |
164 | { | |
165 | return (pte & (PT_WRITABLE_MASK | PT_PRESENT_MASK)) | |
166 | == (PT_WRITABLE_MASK | PT_PRESENT_MASK); | |
167 | } | |
168 | ||
169 | /* | |
170 | * Reverse mapping data structures: | |
171 | * | |
172 | * If page->private bit zero is zero, then page->private points to the | |
173 | * shadow page table entry that points to page_address(page). | |
174 | * | |
175 | * If page->private bit zero is one, (then page->private & ~1) points | |
176 | * to a struct kvm_rmap_desc containing more mappings. | |
177 | */ | |
178 | static void rmap_add(struct kvm *kvm, u64 *spte) | |
179 | { | |
180 | struct page *page; | |
181 | struct kvm_rmap_desc *desc; | |
182 | int i; | |
183 | ||
184 | if (!is_rmap_pte(*spte)) | |
185 | return; | |
186 | page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); | |
187 | if (!page->private) { | |
188 | rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte); | |
189 | page->private = (unsigned long)spte; | |
190 | } else if (!(page->private & 1)) { | |
191 | rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte); | |
192 | desc = kzalloc(sizeof *desc, GFP_NOWAIT); | |
193 | if (!desc) | |
194 | BUG(); /* FIXME: return error */ | |
195 | desc->shadow_ptes[0] = (u64 *)page->private; | |
196 | desc->shadow_ptes[1] = spte; | |
197 | page->private = (unsigned long)desc | 1; | |
198 | } else { | |
199 | rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); | |
200 | desc = (struct kvm_rmap_desc *)(page->private & ~1ul); | |
201 | while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) | |
202 | desc = desc->more; | |
203 | if (desc->shadow_ptes[RMAP_EXT-1]) { | |
204 | desc->more = kzalloc(sizeof *desc->more, GFP_NOWAIT); | |
205 | if (!desc->more) | |
206 | BUG(); /* FIXME: return error */ | |
207 | desc = desc->more; | |
208 | } | |
209 | for (i = 0; desc->shadow_ptes[i]; ++i) | |
210 | ; | |
211 | desc->shadow_ptes[i] = spte; | |
212 | } | |
213 | } | |
214 | ||
215 | static void rmap_desc_remove_entry(struct page *page, | |
216 | struct kvm_rmap_desc *desc, | |
217 | int i, | |
218 | struct kvm_rmap_desc *prev_desc) | |
219 | { | |
220 | int j; | |
221 | ||
222 | for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j) | |
223 | ; | |
224 | desc->shadow_ptes[i] = desc->shadow_ptes[j]; | |
225 | desc->shadow_ptes[j] = 0; | |
226 | if (j != 0) | |
227 | return; | |
228 | if (!prev_desc && !desc->more) | |
229 | page->private = (unsigned long)desc->shadow_ptes[0]; | |
230 | else | |
231 | if (prev_desc) | |
232 | prev_desc->more = desc->more; | |
233 | else | |
234 | page->private = (unsigned long)desc->more | 1; | |
235 | kfree(desc); | |
236 | } | |
237 | ||
238 | static void rmap_remove(struct kvm *kvm, u64 *spte) | |
239 | { | |
240 | struct page *page; | |
241 | struct kvm_rmap_desc *desc; | |
242 | struct kvm_rmap_desc *prev_desc; | |
243 | int i; | |
244 | ||
245 | if (!is_rmap_pte(*spte)) | |
246 | return; | |
247 | page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); | |
248 | if (!page->private) { | |
249 | printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); | |
250 | BUG(); | |
251 | } else if (!(page->private & 1)) { | |
252 | rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte); | |
253 | if ((u64 *)page->private != spte) { | |
254 | printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n", | |
255 | spte, *spte); | |
256 | BUG(); | |
257 | } | |
258 | page->private = 0; | |
259 | } else { | |
260 | rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte); | |
261 | desc = (struct kvm_rmap_desc *)(page->private & ~1ul); | |
262 | prev_desc = NULL; | |
263 | while (desc) { | |
264 | for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) | |
265 | if (desc->shadow_ptes[i] == spte) { | |
266 | rmap_desc_remove_entry(page, desc, i, | |
267 | prev_desc); | |
268 | return; | |
269 | } | |
270 | prev_desc = desc; | |
271 | desc = desc->more; | |
272 | } | |
273 | BUG(); | |
274 | } | |
275 | } | |
276 | ||
6aa8b732 AK |
277 | static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa) |
278 | { | |
279 | struct kvm_mmu_page *page_head = page_header(page_hpa); | |
280 | ||
281 | list_del(&page_head->link); | |
282 | page_head->page_hpa = page_hpa; | |
283 | list_add(&page_head->link, &vcpu->free_pages); | |
284 | } | |
285 | ||
286 | static int is_empty_shadow_page(hpa_t page_hpa) | |
287 | { | |
288 | u32 *pos; | |
289 | u32 *end; | |
290 | for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u32); | |
291 | pos != end; pos++) | |
292 | if (*pos != 0) | |
293 | return 0; | |
294 | return 1; | |
295 | } | |
296 | ||
cea0f0e7 AK |
297 | static unsigned kvm_page_table_hashfn(gfn_t gfn) |
298 | { | |
299 | return gfn; | |
300 | } | |
301 | ||
25c0de2c AK |
302 | static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, |
303 | u64 *parent_pte) | |
6aa8b732 AK |
304 | { |
305 | struct kvm_mmu_page *page; | |
306 | ||
307 | if (list_empty(&vcpu->free_pages)) | |
25c0de2c | 308 | return NULL; |
6aa8b732 AK |
309 | |
310 | page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link); | |
311 | list_del(&page->link); | |
312 | list_add(&page->link, &vcpu->kvm->active_mmu_pages); | |
313 | ASSERT(is_empty_shadow_page(page->page_hpa)); | |
314 | page->slot_bitmap = 0; | |
315 | page->global = 1; | |
cea0f0e7 | 316 | page->multimapped = 0; |
6aa8b732 | 317 | page->parent_pte = parent_pte; |
25c0de2c | 318 | return page; |
6aa8b732 AK |
319 | } |
320 | ||
cea0f0e7 AK |
321 | static void mmu_page_add_parent_pte(struct kvm_mmu_page *page, u64 *parent_pte) |
322 | { | |
323 | struct kvm_pte_chain *pte_chain; | |
324 | struct hlist_node *node; | |
325 | int i; | |
326 | ||
327 | if (!parent_pte) | |
328 | return; | |
329 | if (!page->multimapped) { | |
330 | u64 *old = page->parent_pte; | |
331 | ||
332 | if (!old) { | |
333 | page->parent_pte = parent_pte; | |
334 | return; | |
335 | } | |
336 | page->multimapped = 1; | |
337 | pte_chain = kzalloc(sizeof(struct kvm_pte_chain), GFP_NOWAIT); | |
338 | BUG_ON(!pte_chain); | |
339 | INIT_HLIST_HEAD(&page->parent_ptes); | |
340 | hlist_add_head(&pte_chain->link, &page->parent_ptes); | |
341 | pte_chain->parent_ptes[0] = old; | |
342 | } | |
343 | hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) { | |
344 | if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1]) | |
345 | continue; | |
346 | for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) | |
347 | if (!pte_chain->parent_ptes[i]) { | |
348 | pte_chain->parent_ptes[i] = parent_pte; | |
349 | return; | |
350 | } | |
351 | } | |
352 | pte_chain = kzalloc(sizeof(struct kvm_pte_chain), GFP_NOWAIT); | |
353 | BUG_ON(!pte_chain); | |
354 | hlist_add_head(&pte_chain->link, &page->parent_ptes); | |
355 | pte_chain->parent_ptes[0] = parent_pte; | |
356 | } | |
357 | ||
358 | static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page, | |
359 | u64 *parent_pte) | |
360 | { | |
361 | struct kvm_pte_chain *pte_chain; | |
362 | struct hlist_node *node; | |
363 | int i; | |
364 | ||
365 | if (!page->multimapped) { | |
366 | BUG_ON(page->parent_pte != parent_pte); | |
367 | page->parent_pte = NULL; | |
368 | return; | |
369 | } | |
370 | hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) | |
371 | for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) { | |
372 | if (!pte_chain->parent_ptes[i]) | |
373 | break; | |
374 | if (pte_chain->parent_ptes[i] != parent_pte) | |
375 | continue; | |
376 | while (i + 1 < NR_PTE_CHAIN_ENTRIES) { | |
377 | pte_chain->parent_ptes[i] | |
378 | = pte_chain->parent_ptes[i + 1]; | |
379 | ++i; | |
380 | } | |
381 | pte_chain->parent_ptes[i] = NULL; | |
382 | return; | |
383 | } | |
384 | BUG(); | |
385 | } | |
386 | ||
387 | static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu, | |
388 | gfn_t gfn) | |
389 | { | |
390 | unsigned index; | |
391 | struct hlist_head *bucket; | |
392 | struct kvm_mmu_page *page; | |
393 | struct hlist_node *node; | |
394 | ||
395 | pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); | |
396 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | |
397 | bucket = &vcpu->kvm->mmu_page_hash[index]; | |
398 | hlist_for_each_entry(page, node, bucket, hash_link) | |
399 | if (page->gfn == gfn && !page->role.metaphysical) { | |
400 | pgprintk("%s: found role %x\n", | |
401 | __FUNCTION__, page->role.word); | |
402 | return page; | |
403 | } | |
404 | return NULL; | |
405 | } | |
406 | ||
407 | static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |
408 | gfn_t gfn, | |
409 | gva_t gaddr, | |
410 | unsigned level, | |
411 | int metaphysical, | |
412 | u64 *parent_pte) | |
413 | { | |
414 | union kvm_mmu_page_role role; | |
415 | unsigned index; | |
416 | unsigned quadrant; | |
417 | struct hlist_head *bucket; | |
418 | struct kvm_mmu_page *page; | |
419 | struct hlist_node *node; | |
420 | ||
421 | role.word = 0; | |
422 | role.glevels = vcpu->mmu.root_level; | |
423 | role.level = level; | |
424 | role.metaphysical = metaphysical; | |
425 | if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) { | |
426 | quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); | |
427 | quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; | |
428 | role.quadrant = quadrant; | |
429 | } | |
430 | pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__, | |
431 | gfn, role.word); | |
432 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | |
433 | bucket = &vcpu->kvm->mmu_page_hash[index]; | |
434 | hlist_for_each_entry(page, node, bucket, hash_link) | |
435 | if (page->gfn == gfn && page->role.word == role.word) { | |
436 | mmu_page_add_parent_pte(page, parent_pte); | |
437 | pgprintk("%s: found\n", __FUNCTION__); | |
438 | return page; | |
439 | } | |
440 | page = kvm_mmu_alloc_page(vcpu, parent_pte); | |
441 | if (!page) | |
442 | return page; | |
443 | pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word); | |
444 | page->gfn = gfn; | |
445 | page->role = role; | |
446 | hlist_add_head(&page->hash_link, bucket); | |
447 | return page; | |
448 | } | |
449 | ||
450 | static void kvm_mmu_put_page(struct kvm_vcpu *vcpu, | |
451 | struct kvm_mmu_page *page, | |
452 | u64 *parent_pte) | |
453 | { | |
454 | mmu_page_remove_parent_pte(page, parent_pte); | |
455 | } | |
456 | ||
6aa8b732 AK |
457 | static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa) |
458 | { | |
459 | int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT)); | |
460 | struct kvm_mmu_page *page_head = page_header(__pa(pte)); | |
461 | ||
462 | __set_bit(slot, &page_head->slot_bitmap); | |
463 | } | |
464 | ||
465 | hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa) | |
466 | { | |
467 | hpa_t hpa = gpa_to_hpa(vcpu, gpa); | |
468 | ||
469 | return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa; | |
470 | } | |
471 | ||
472 | hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa) | |
473 | { | |
474 | struct kvm_memory_slot *slot; | |
475 | struct page *page; | |
476 | ||
477 | ASSERT((gpa & HPA_ERR_MASK) == 0); | |
478 | slot = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT); | |
479 | if (!slot) | |
480 | return gpa | HPA_ERR_MASK; | |
481 | page = gfn_to_page(slot, gpa >> PAGE_SHIFT); | |
482 | return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT) | |
483 | | (gpa & (PAGE_SIZE-1)); | |
484 | } | |
485 | ||
486 | hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva) | |
487 | { | |
488 | gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva); | |
489 | ||
490 | if (gpa == UNMAPPED_GVA) | |
491 | return UNMAPPED_GVA; | |
492 | return gpa_to_hpa(vcpu, gpa); | |
493 | } | |
494 | ||
495 | ||
496 | static void release_pt_page_64(struct kvm_vcpu *vcpu, hpa_t page_hpa, | |
497 | int level) | |
498 | { | |
cd4a4e53 AK |
499 | u64 *pos; |
500 | u64 *end; | |
501 | ||
6aa8b732 AK |
502 | ASSERT(vcpu); |
503 | ASSERT(VALID_PAGE(page_hpa)); | |
504 | ASSERT(level <= PT64_ROOT_LEVEL && level > 0); | |
505 | ||
cd4a4e53 AK |
506 | for (pos = __va(page_hpa), end = pos + PT64_ENT_PER_PAGE; |
507 | pos != end; pos++) { | |
508 | u64 current_ent = *pos; | |
6aa8b732 | 509 | |
cd4a4e53 AK |
510 | if (is_present_pte(current_ent)) { |
511 | if (level != 1) | |
6aa8b732 AK |
512 | release_pt_page_64(vcpu, |
513 | current_ent & | |
514 | PT64_BASE_ADDR_MASK, | |
515 | level - 1); | |
cd4a4e53 AK |
516 | else |
517 | rmap_remove(vcpu->kvm, pos); | |
6aa8b732 | 518 | } |
cd4a4e53 | 519 | *pos = 0; |
6aa8b732 AK |
520 | } |
521 | kvm_mmu_free_page(vcpu, page_hpa); | |
522 | } | |
523 | ||
524 | static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) | |
525 | { | |
526 | } | |
527 | ||
528 | static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p) | |
529 | { | |
530 | int level = PT32E_ROOT_LEVEL; | |
531 | hpa_t table_addr = vcpu->mmu.root_hpa; | |
532 | ||
533 | for (; ; level--) { | |
534 | u32 index = PT64_INDEX(v, level); | |
535 | u64 *table; | |
cea0f0e7 | 536 | u64 pte; |
6aa8b732 AK |
537 | |
538 | ASSERT(VALID_PAGE(table_addr)); | |
539 | table = __va(table_addr); | |
540 | ||
541 | if (level == 1) { | |
cea0f0e7 AK |
542 | pte = table[index]; |
543 | if (is_present_pte(pte) && is_writeble_pte(pte)) | |
544 | return 0; | |
6aa8b732 AK |
545 | mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT); |
546 | page_header_update_slot(vcpu->kvm, table, v); | |
547 | table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK | | |
548 | PT_USER_MASK; | |
cd4a4e53 | 549 | rmap_add(vcpu->kvm, &table[index]); |
6aa8b732 AK |
550 | return 0; |
551 | } | |
552 | ||
553 | if (table[index] == 0) { | |
25c0de2c | 554 | struct kvm_mmu_page *new_table; |
cea0f0e7 | 555 | gfn_t pseudo_gfn; |
6aa8b732 | 556 | |
cea0f0e7 AK |
557 | pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK) |
558 | >> PAGE_SHIFT; | |
559 | new_table = kvm_mmu_get_page(vcpu, pseudo_gfn, | |
560 | v, level - 1, | |
561 | 1, &table[index]); | |
25c0de2c | 562 | if (!new_table) { |
6aa8b732 AK |
563 | pgprintk("nonpaging_map: ENOMEM\n"); |
564 | return -ENOMEM; | |
565 | } | |
566 | ||
25c0de2c AK |
567 | table[index] = new_table->page_hpa | PT_PRESENT_MASK |
568 | | PT_WRITABLE_MASK | PT_USER_MASK; | |
6aa8b732 AK |
569 | } |
570 | table_addr = table[index] & PT64_BASE_ADDR_MASK; | |
571 | } | |
572 | } | |
573 | ||
17ac10ad AK |
574 | static void mmu_free_roots(struct kvm_vcpu *vcpu) |
575 | { | |
576 | int i; | |
577 | ||
578 | #ifdef CONFIG_X86_64 | |
579 | if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) { | |
580 | hpa_t root = vcpu->mmu.root_hpa; | |
581 | ||
582 | ASSERT(VALID_PAGE(root)); | |
17ac10ad AK |
583 | vcpu->mmu.root_hpa = INVALID_PAGE; |
584 | return; | |
585 | } | |
586 | #endif | |
587 | for (i = 0; i < 4; ++i) { | |
588 | hpa_t root = vcpu->mmu.pae_root[i]; | |
589 | ||
590 | ASSERT(VALID_PAGE(root)); | |
591 | root &= PT64_BASE_ADDR_MASK; | |
17ac10ad AK |
592 | vcpu->mmu.pae_root[i] = INVALID_PAGE; |
593 | } | |
594 | vcpu->mmu.root_hpa = INVALID_PAGE; | |
595 | } | |
596 | ||
597 | static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |
598 | { | |
599 | int i; | |
cea0f0e7 AK |
600 | gfn_t root_gfn; |
601 | root_gfn = vcpu->cr3 >> PAGE_SHIFT; | |
17ac10ad AK |
602 | |
603 | #ifdef CONFIG_X86_64 | |
604 | if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) { | |
605 | hpa_t root = vcpu->mmu.root_hpa; | |
606 | ||
607 | ASSERT(!VALID_PAGE(root)); | |
cea0f0e7 AK |
608 | root = kvm_mmu_get_page(vcpu, root_gfn, 0, |
609 | PT64_ROOT_LEVEL, 0, NULL)->page_hpa; | |
17ac10ad AK |
610 | vcpu->mmu.root_hpa = root; |
611 | return; | |
612 | } | |
613 | #endif | |
614 | for (i = 0; i < 4; ++i) { | |
615 | hpa_t root = vcpu->mmu.pae_root[i]; | |
616 | ||
617 | ASSERT(!VALID_PAGE(root)); | |
cea0f0e7 AK |
618 | if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) |
619 | root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT; | |
620 | else if (vcpu->mmu.root_level == 0) | |
621 | root_gfn = 0; | |
622 | root = kvm_mmu_get_page(vcpu, root_gfn, i << 30, | |
623 | PT32_ROOT_LEVEL, !is_paging(vcpu), | |
624 | NULL)->page_hpa; | |
17ac10ad AK |
625 | vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK; |
626 | } | |
627 | vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root); | |
628 | } | |
629 | ||
6aa8b732 AK |
630 | static void nonpaging_flush(struct kvm_vcpu *vcpu) |
631 | { | |
632 | hpa_t root = vcpu->mmu.root_hpa; | |
633 | ||
634 | ++kvm_stat.tlb_flush; | |
635 | pgprintk("nonpaging_flush\n"); | |
17ac10ad AK |
636 | mmu_free_roots(vcpu); |
637 | mmu_alloc_roots(vcpu); | |
6aa8b732 AK |
638 | kvm_arch_ops->set_cr3(vcpu, root); |
639 | kvm_arch_ops->tlb_flush(vcpu); | |
640 | } | |
641 | ||
642 | static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) | |
643 | { | |
644 | return vaddr; | |
645 | } | |
646 | ||
647 | static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, | |
648 | u32 error_code) | |
649 | { | |
650 | int ret; | |
651 | gpa_t addr = gva; | |
652 | ||
653 | ASSERT(vcpu); | |
654 | ASSERT(VALID_PAGE(vcpu->mmu.root_hpa)); | |
655 | ||
656 | for (;;) { | |
657 | hpa_t paddr; | |
658 | ||
659 | paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK); | |
660 | ||
661 | if (is_error_hpa(paddr)) | |
662 | return 1; | |
663 | ||
664 | ret = nonpaging_map(vcpu, addr & PAGE_MASK, paddr); | |
665 | if (ret) { | |
666 | nonpaging_flush(vcpu); | |
667 | continue; | |
668 | } | |
669 | break; | |
670 | } | |
671 | return ret; | |
672 | } | |
673 | ||
674 | static void nonpaging_inval_page(struct kvm_vcpu *vcpu, gva_t addr) | |
675 | { | |
676 | } | |
677 | ||
678 | static void nonpaging_free(struct kvm_vcpu *vcpu) | |
679 | { | |
17ac10ad | 680 | mmu_free_roots(vcpu); |
6aa8b732 AK |
681 | } |
682 | ||
683 | static int nonpaging_init_context(struct kvm_vcpu *vcpu) | |
684 | { | |
685 | struct kvm_mmu *context = &vcpu->mmu; | |
686 | ||
687 | context->new_cr3 = nonpaging_new_cr3; | |
688 | context->page_fault = nonpaging_page_fault; | |
689 | context->inval_page = nonpaging_inval_page; | |
690 | context->gva_to_gpa = nonpaging_gva_to_gpa; | |
691 | context->free = nonpaging_free; | |
cea0f0e7 | 692 | context->root_level = 0; |
6aa8b732 | 693 | context->shadow_root_level = PT32E_ROOT_LEVEL; |
17ac10ad | 694 | mmu_alloc_roots(vcpu); |
6aa8b732 AK |
695 | ASSERT(VALID_PAGE(context->root_hpa)); |
696 | kvm_arch_ops->set_cr3(vcpu, context->root_hpa); | |
697 | return 0; | |
698 | } | |
699 | ||
6aa8b732 AK |
700 | static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) |
701 | { | |
6aa8b732 AK |
702 | ++kvm_stat.tlb_flush; |
703 | kvm_arch_ops->tlb_flush(vcpu); | |
704 | } | |
705 | ||
706 | static void paging_new_cr3(struct kvm_vcpu *vcpu) | |
707 | { | |
cea0f0e7 AK |
708 | mmu_free_roots(vcpu); |
709 | mmu_alloc_roots(vcpu); | |
6aa8b732 | 710 | kvm_mmu_flush_tlb(vcpu); |
cea0f0e7 | 711 | kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa); |
6aa8b732 AK |
712 | } |
713 | ||
714 | static void mark_pagetable_nonglobal(void *shadow_pte) | |
715 | { | |
716 | page_header(__pa(shadow_pte))->global = 0; | |
717 | } | |
718 | ||
719 | static inline void set_pte_common(struct kvm_vcpu *vcpu, | |
720 | u64 *shadow_pte, | |
721 | gpa_t gaddr, | |
722 | int dirty, | |
723 | u64 access_bits) | |
724 | { | |
725 | hpa_t paddr; | |
726 | ||
727 | *shadow_pte |= access_bits << PT_SHADOW_BITS_OFFSET; | |
728 | if (!dirty) | |
729 | access_bits &= ~PT_WRITABLE_MASK; | |
cea0f0e7 AK |
730 | if (access_bits & PT_WRITABLE_MASK) { |
731 | struct kvm_mmu_page *shadow; | |
732 | ||
733 | shadow = kvm_mmu_lookup_page(vcpu, gaddr >> PAGE_SHIFT); | |
734 | if (shadow) | |
735 | pgprintk("%s: found shadow page for %lx, marking ro\n", | |
736 | __FUNCTION__, (gfn_t)(gaddr >> PAGE_SHIFT)); | |
737 | if (shadow) | |
738 | access_bits &= ~PT_WRITABLE_MASK; | |
739 | } | |
6aa8b732 AK |
740 | |
741 | if (access_bits & PT_WRITABLE_MASK) | |
742 | mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT); | |
743 | ||
744 | *shadow_pte |= access_bits; | |
745 | ||
746 | paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK); | |
747 | ||
748 | if (!(*shadow_pte & PT_GLOBAL_MASK)) | |
749 | mark_pagetable_nonglobal(shadow_pte); | |
750 | ||
751 | if (is_error_hpa(paddr)) { | |
752 | *shadow_pte |= gaddr; | |
753 | *shadow_pte |= PT_SHADOW_IO_MARK; | |
754 | *shadow_pte &= ~PT_PRESENT_MASK; | |
755 | } else { | |
756 | *shadow_pte |= paddr; | |
757 | page_header_update_slot(vcpu->kvm, shadow_pte, gaddr); | |
cd4a4e53 | 758 | rmap_add(vcpu->kvm, shadow_pte); |
6aa8b732 AK |
759 | } |
760 | } | |
761 | ||
762 | static void inject_page_fault(struct kvm_vcpu *vcpu, | |
763 | u64 addr, | |
764 | u32 err_code) | |
765 | { | |
766 | kvm_arch_ops->inject_page_fault(vcpu, addr, err_code); | |
767 | } | |
768 | ||
769 | static inline int fix_read_pf(u64 *shadow_ent) | |
770 | { | |
771 | if ((*shadow_ent & PT_SHADOW_USER_MASK) && | |
772 | !(*shadow_ent & PT_USER_MASK)) { | |
773 | /* | |
774 | * If supervisor write protect is disabled, we shadow kernel | |
775 | * pages as user pages so we can trap the write access. | |
776 | */ | |
777 | *shadow_ent |= PT_USER_MASK; | |
778 | *shadow_ent &= ~PT_WRITABLE_MASK; | |
779 | ||
780 | return 1; | |
781 | ||
782 | } | |
783 | return 0; | |
784 | } | |
785 | ||
786 | static int may_access(u64 pte, int write, int user) | |
787 | { | |
788 | ||
789 | if (user && !(pte & PT_USER_MASK)) | |
790 | return 0; | |
791 | if (write && !(pte & PT_WRITABLE_MASK)) | |
792 | return 0; | |
793 | return 1; | |
794 | } | |
795 | ||
796 | /* | |
797 | * Remove a shadow pte. | |
798 | */ | |
799 | static void paging_inval_page(struct kvm_vcpu *vcpu, gva_t addr) | |
800 | { | |
801 | hpa_t page_addr = vcpu->mmu.root_hpa; | |
802 | int level = vcpu->mmu.shadow_root_level; | |
803 | ||
804 | ++kvm_stat.invlpg; | |
805 | ||
806 | for (; ; level--) { | |
807 | u32 index = PT64_INDEX(addr, level); | |
808 | u64 *table = __va(page_addr); | |
809 | ||
810 | if (level == PT_PAGE_TABLE_LEVEL ) { | |
cd4a4e53 | 811 | rmap_remove(vcpu->kvm, &table[index]); |
6aa8b732 AK |
812 | table[index] = 0; |
813 | return; | |
814 | } | |
815 | ||
816 | if (!is_present_pte(table[index])) | |
817 | return; | |
818 | ||
819 | page_addr = table[index] & PT64_BASE_ADDR_MASK; | |
820 | ||
821 | if (level == PT_DIRECTORY_LEVEL && | |
822 | (table[index] & PT_SHADOW_PS_MARK)) { | |
823 | table[index] = 0; | |
824 | release_pt_page_64(vcpu, page_addr, PT_PAGE_TABLE_LEVEL); | |
825 | ||
826 | kvm_arch_ops->tlb_flush(vcpu); | |
827 | return; | |
828 | } | |
829 | } | |
830 | } | |
831 | ||
832 | static void paging_free(struct kvm_vcpu *vcpu) | |
833 | { | |
834 | nonpaging_free(vcpu); | |
835 | } | |
836 | ||
837 | #define PTTYPE 64 | |
838 | #include "paging_tmpl.h" | |
839 | #undef PTTYPE | |
840 | ||
841 | #define PTTYPE 32 | |
842 | #include "paging_tmpl.h" | |
843 | #undef PTTYPE | |
844 | ||
17ac10ad | 845 | static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level) |
6aa8b732 AK |
846 | { |
847 | struct kvm_mmu *context = &vcpu->mmu; | |
848 | ||
849 | ASSERT(is_pae(vcpu)); | |
850 | context->new_cr3 = paging_new_cr3; | |
851 | context->page_fault = paging64_page_fault; | |
852 | context->inval_page = paging_inval_page; | |
853 | context->gva_to_gpa = paging64_gva_to_gpa; | |
854 | context->free = paging_free; | |
17ac10ad AK |
855 | context->root_level = level; |
856 | context->shadow_root_level = level; | |
857 | mmu_alloc_roots(vcpu); | |
6aa8b732 AK |
858 | ASSERT(VALID_PAGE(context->root_hpa)); |
859 | kvm_arch_ops->set_cr3(vcpu, context->root_hpa | | |
860 | (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK))); | |
861 | return 0; | |
862 | } | |
863 | ||
17ac10ad AK |
864 | static int paging64_init_context(struct kvm_vcpu *vcpu) |
865 | { | |
866 | return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL); | |
867 | } | |
868 | ||
6aa8b732 AK |
869 | static int paging32_init_context(struct kvm_vcpu *vcpu) |
870 | { | |
871 | struct kvm_mmu *context = &vcpu->mmu; | |
872 | ||
873 | context->new_cr3 = paging_new_cr3; | |
874 | context->page_fault = paging32_page_fault; | |
875 | context->inval_page = paging_inval_page; | |
876 | context->gva_to_gpa = paging32_gva_to_gpa; | |
877 | context->free = paging_free; | |
878 | context->root_level = PT32_ROOT_LEVEL; | |
879 | context->shadow_root_level = PT32E_ROOT_LEVEL; | |
17ac10ad | 880 | mmu_alloc_roots(vcpu); |
6aa8b732 AK |
881 | ASSERT(VALID_PAGE(context->root_hpa)); |
882 | kvm_arch_ops->set_cr3(vcpu, context->root_hpa | | |
883 | (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK))); | |
884 | return 0; | |
885 | } | |
886 | ||
887 | static int paging32E_init_context(struct kvm_vcpu *vcpu) | |
888 | { | |
17ac10ad | 889 | return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL); |
6aa8b732 AK |
890 | } |
891 | ||
892 | static int init_kvm_mmu(struct kvm_vcpu *vcpu) | |
893 | { | |
894 | ASSERT(vcpu); | |
895 | ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa)); | |
896 | ||
897 | if (!is_paging(vcpu)) | |
898 | return nonpaging_init_context(vcpu); | |
a9058ecd | 899 | else if (is_long_mode(vcpu)) |
6aa8b732 AK |
900 | return paging64_init_context(vcpu); |
901 | else if (is_pae(vcpu)) | |
902 | return paging32E_init_context(vcpu); | |
903 | else | |
904 | return paging32_init_context(vcpu); | |
905 | } | |
906 | ||
907 | static void destroy_kvm_mmu(struct kvm_vcpu *vcpu) | |
908 | { | |
909 | ASSERT(vcpu); | |
910 | if (VALID_PAGE(vcpu->mmu.root_hpa)) { | |
911 | vcpu->mmu.free(vcpu); | |
912 | vcpu->mmu.root_hpa = INVALID_PAGE; | |
913 | } | |
914 | } | |
915 | ||
916 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu) | |
917 | { | |
918 | destroy_kvm_mmu(vcpu); | |
919 | return init_kvm_mmu(vcpu); | |
920 | } | |
921 | ||
922 | static void free_mmu_pages(struct kvm_vcpu *vcpu) | |
923 | { | |
924 | while (!list_empty(&vcpu->free_pages)) { | |
925 | struct kvm_mmu_page *page; | |
926 | ||
927 | page = list_entry(vcpu->free_pages.next, | |
928 | struct kvm_mmu_page, link); | |
929 | list_del(&page->link); | |
930 | __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT)); | |
931 | page->page_hpa = INVALID_PAGE; | |
932 | } | |
17ac10ad | 933 | free_page((unsigned long)vcpu->mmu.pae_root); |
6aa8b732 AK |
934 | } |
935 | ||
936 | static int alloc_mmu_pages(struct kvm_vcpu *vcpu) | |
937 | { | |
17ac10ad | 938 | struct page *page; |
6aa8b732 AK |
939 | int i; |
940 | ||
941 | ASSERT(vcpu); | |
942 | ||
943 | for (i = 0; i < KVM_NUM_MMU_PAGES; i++) { | |
6aa8b732 AK |
944 | struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i]; |
945 | ||
946 | INIT_LIST_HEAD(&page_header->link); | |
17ac10ad | 947 | if ((page = alloc_page(GFP_KERNEL)) == NULL) |
6aa8b732 AK |
948 | goto error_1; |
949 | page->private = (unsigned long)page_header; | |
950 | page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT; | |
951 | memset(__va(page_header->page_hpa), 0, PAGE_SIZE); | |
952 | list_add(&page_header->link, &vcpu->free_pages); | |
953 | } | |
17ac10ad AK |
954 | |
955 | /* | |
956 | * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. | |
957 | * Therefore we need to allocate shadow page tables in the first | |
958 | * 4GB of memory, which happens to fit the DMA32 zone. | |
959 | */ | |
960 | page = alloc_page(GFP_KERNEL | __GFP_DMA32); | |
961 | if (!page) | |
962 | goto error_1; | |
963 | vcpu->mmu.pae_root = page_address(page); | |
964 | for (i = 0; i < 4; ++i) | |
965 | vcpu->mmu.pae_root[i] = INVALID_PAGE; | |
966 | ||
6aa8b732 AK |
967 | return 0; |
968 | ||
969 | error_1: | |
970 | free_mmu_pages(vcpu); | |
971 | return -ENOMEM; | |
972 | } | |
973 | ||
8018c27b | 974 | int kvm_mmu_create(struct kvm_vcpu *vcpu) |
6aa8b732 | 975 | { |
6aa8b732 AK |
976 | ASSERT(vcpu); |
977 | ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa)); | |
978 | ASSERT(list_empty(&vcpu->free_pages)); | |
979 | ||
8018c27b IM |
980 | return alloc_mmu_pages(vcpu); |
981 | } | |
6aa8b732 | 982 | |
8018c27b IM |
983 | int kvm_mmu_setup(struct kvm_vcpu *vcpu) |
984 | { | |
985 | ASSERT(vcpu); | |
986 | ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa)); | |
987 | ASSERT(!list_empty(&vcpu->free_pages)); | |
2c264957 | 988 | |
8018c27b | 989 | return init_kvm_mmu(vcpu); |
6aa8b732 AK |
990 | } |
991 | ||
992 | void kvm_mmu_destroy(struct kvm_vcpu *vcpu) | |
993 | { | |
994 | ASSERT(vcpu); | |
995 | ||
996 | destroy_kvm_mmu(vcpu); | |
997 | free_mmu_pages(vcpu); | |
998 | } | |
999 | ||
1000 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) | |
1001 | { | |
1002 | struct kvm_mmu_page *page; | |
1003 | ||
1004 | list_for_each_entry(page, &kvm->active_mmu_pages, link) { | |
1005 | int i; | |
1006 | u64 *pt; | |
1007 | ||
1008 | if (!test_bit(slot, &page->slot_bitmap)) | |
1009 | continue; | |
1010 | ||
1011 | pt = __va(page->page_hpa); | |
1012 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) | |
1013 | /* avoid RMW */ | |
cd4a4e53 AK |
1014 | if (pt[i] & PT_WRITABLE_MASK) { |
1015 | rmap_remove(kvm, &pt[i]); | |
6aa8b732 | 1016 | pt[i] &= ~PT_WRITABLE_MASK; |
cd4a4e53 | 1017 | } |
6aa8b732 AK |
1018 | } |
1019 | } |