KVM: Add mmu cache clear function
[linux-2.6-block.git] / drivers / kvm / kvm.h
CommitLineData
6aa8b732
AK
1#ifndef __KVM_H
2#define __KVM_H
3
4/*
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9#include <linux/types.h>
10#include <linux/list.h>
11#include <linux/mutex.h>
12#include <linux/spinlock.h>
13#include <linux/mm.h>
14
15#include "vmx.h"
16#include <linux/kvm.h>
102d8325 17#include <linux/kvm_para.h>
6aa8b732
AK
18
19#define CR0_PE_MASK (1ULL << 0)
20#define CR0_TS_MASK (1ULL << 3)
21#define CR0_NE_MASK (1ULL << 5)
22#define CR0_WP_MASK (1ULL << 16)
23#define CR0_NW_MASK (1ULL << 29)
24#define CR0_CD_MASK (1ULL << 30)
25#define CR0_PG_MASK (1ULL << 31)
26
27#define CR3_WPT_MASK (1ULL << 3)
28#define CR3_PCD_MASK (1ULL << 4)
29
30#define CR3_RESEVED_BITS 0x07ULL
31#define CR3_L_MODE_RESEVED_BITS (~((1ULL << 40) - 1) | 0x0fe7ULL)
32#define CR3_FLAGS_MASK ((1ULL << 5) - 1)
33
34#define CR4_VME_MASK (1ULL << 0)
35#define CR4_PSE_MASK (1ULL << 4)
36#define CR4_PAE_MASK (1ULL << 5)
37#define CR4_PGE_MASK (1ULL << 7)
38#define CR4_VMXE_MASK (1ULL << 13)
39
40#define KVM_GUEST_CR0_MASK \
41 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK \
42 | CR0_NW_MASK | CR0_CD_MASK)
43#define KVM_VM_CR0_ALWAYS_ON \
44 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK)
45#define KVM_GUEST_CR4_MASK \
46 (CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK)
47#define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK)
48#define KVM_RMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK | CR4_VME_MASK)
49
50#define INVALID_PAGE (~(hpa_t)0)
51#define UNMAPPED_GVA (~(gpa_t)0)
52
53#define KVM_MAX_VCPUS 1
54#define KVM_MEMORY_SLOTS 4
55#define KVM_NUM_MMU_PAGES 256
ebeace86
AK
56#define KVM_MIN_FREE_MMU_PAGES 5
57#define KVM_REFILL_PAGES 25
06465c5a 58#define KVM_MAX_CPUID_ENTRIES 40
6aa8b732
AK
59
60#define FX_IMAGE_SIZE 512
61#define FX_IMAGE_ALIGN 16
62#define FX_BUF_SIZE (2 * FX_IMAGE_SIZE + FX_IMAGE_ALIGN)
63
64#define DE_VECTOR 0
65#define DF_VECTOR 8
66#define TS_VECTOR 10
67#define NP_VECTOR 11
68#define SS_VECTOR 12
69#define GP_VECTOR 13
70#define PF_VECTOR 14
71
72#define SELECTOR_TI_MASK (1 << 2)
73#define SELECTOR_RPL_MASK 0x03
74
75#define IOPL_SHIFT 12
76
039576c0
AK
77#define KVM_PIO_PAGE_OFFSET 1
78
6aa8b732
AK
79/*
80 * Address types:
81 *
82 * gva - guest virtual address
83 * gpa - guest physical address
84 * gfn - guest frame number
85 * hva - host virtual address
86 * hpa - host physical address
87 * hfn - host frame number
88 */
89
90typedef unsigned long gva_t;
91typedef u64 gpa_t;
92typedef unsigned long gfn_t;
93
94typedef unsigned long hva_t;
95typedef u64 hpa_t;
96typedef unsigned long hfn_t;
97
cea0f0e7
AK
98#define NR_PTE_CHAIN_ENTRIES 5
99
100struct kvm_pte_chain {
101 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
102 struct hlist_node link;
103};
104
105/*
106 * kvm_mmu_page_role, below, is defined as:
107 *
108 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
109 * bits 4:7 - page table level for this shadow (1-4)
110 * bits 8:9 - page table quadrant for 2-level guests
111 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
d28c6cfb 112 * bits 17:18 - "access" - the user and writable bits of a huge page pde
cea0f0e7
AK
113 */
114union kvm_mmu_page_role {
115 unsigned word;
116 struct {
117 unsigned glevels : 4;
118 unsigned level : 4;
119 unsigned quadrant : 2;
120 unsigned pad_for_nice_hex_output : 6;
121 unsigned metaphysical : 1;
d28c6cfb 122 unsigned hugepage_access : 2;
cea0f0e7
AK
123 };
124};
125
6aa8b732
AK
126struct kvm_mmu_page {
127 struct list_head link;
cea0f0e7
AK
128 struct hlist_node hash_link;
129
130 /*
131 * The following two entries are used to key the shadow page in the
132 * hash table.
133 */
134 gfn_t gfn;
135 union kvm_mmu_page_role role;
136
6aa8b732
AK
137 hpa_t page_hpa;
138 unsigned long slot_bitmap; /* One bit set per slot which has memory
139 * in this shadow page.
140 */
cea0f0e7 141 int multimapped; /* More than one parent_pte? */
3bb65a22 142 int root_count; /* Currently serving as active root */
cea0f0e7
AK
143 union {
144 u64 *parent_pte; /* !multimapped */
145 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
146 };
6aa8b732
AK
147};
148
149struct vmcs {
150 u32 revision_id;
151 u32 abort;
152 char data[0];
153};
154
155#define vmx_msr_entry kvm_msr_entry
156
157struct kvm_vcpu;
158
159/*
160 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
161 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
162 * mode.
163 */
164struct kvm_mmu {
165 void (*new_cr3)(struct kvm_vcpu *vcpu);
166 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
6aa8b732
AK
167 void (*free)(struct kvm_vcpu *vcpu);
168 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
169 hpa_t root_hpa;
170 int root_level;
171 int shadow_root_level;
17ac10ad
AK
172
173 u64 *pae_root;
6aa8b732
AK
174};
175
714b93da
AK
176#define KVM_NR_MEM_OBJS 20
177
178struct kvm_mmu_memory_cache {
179 int nobjs;
180 void *objects[KVM_NR_MEM_OBJS];
181};
182
183/*
184 * We don't want allocation failures within the mmu code, so we preallocate
185 * enough memory for a single page fault in a cache.
186 */
6aa8b732
AK
187struct kvm_guest_debug {
188 int enabled;
189 unsigned long bp[4];
190 int singlestep;
191};
192
193enum {
194 VCPU_REGS_RAX = 0,
195 VCPU_REGS_RCX = 1,
196 VCPU_REGS_RDX = 2,
197 VCPU_REGS_RBX = 3,
198 VCPU_REGS_RSP = 4,
199 VCPU_REGS_RBP = 5,
200 VCPU_REGS_RSI = 6,
201 VCPU_REGS_RDI = 7,
05b3e0c2 202#ifdef CONFIG_X86_64
6aa8b732
AK
203 VCPU_REGS_R8 = 8,
204 VCPU_REGS_R9 = 9,
205 VCPU_REGS_R10 = 10,
206 VCPU_REGS_R11 = 11,
207 VCPU_REGS_R12 = 12,
208 VCPU_REGS_R13 = 13,
209 VCPU_REGS_R14 = 14,
210 VCPU_REGS_R15 = 15,
211#endif
212 NR_VCPU_REGS
213};
214
215enum {
216 VCPU_SREG_CS,
217 VCPU_SREG_DS,
218 VCPU_SREG_ES,
219 VCPU_SREG_FS,
220 VCPU_SREG_GS,
221 VCPU_SREG_SS,
222 VCPU_SREG_TR,
223 VCPU_SREG_LDTR,
224};
225
039576c0
AK
226struct kvm_pio_request {
227 unsigned long count;
228 int cur_count;
229 struct page *guest_pages[2];
230 unsigned guest_page_offset;
231 int in;
232 int size;
233 int string;
234 int down;
235 int rep;
236};
237
6aa8b732
AK
238struct kvm_vcpu {
239 struct kvm *kvm;
240 union {
241 struct vmcs *vmcs;
242 struct vcpu_svm *svm;
243 };
244 struct mutex mutex;
245 int cpu;
246 int launched;
0cc5064d 247 u64 host_tsc;
9a2bb7f4 248 struct kvm_run *run;
c1150d8c 249 int interrupt_window_open;
6aa8b732
AK
250 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
251#define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long)
252 unsigned long irq_pending[NR_IRQ_WORDS];
253 unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
254 unsigned long rip; /* needs vcpu_load_rsp_rip() */
255
256 unsigned long cr0;
257 unsigned long cr2;
258 unsigned long cr3;
102d8325
IM
259 gpa_t para_state_gpa;
260 struct page *para_state_page;
261 gpa_t hypercall_gpa;
6aa8b732
AK
262 unsigned long cr4;
263 unsigned long cr8;
1342d353 264 u64 pdptrs[4]; /* pae */
6aa8b732
AK
265 u64 shadow_efer;
266 u64 apic_base;
6f00e68f 267 u64 ia32_misc_enable_msr;
6aa8b732
AK
268 int nmsrs;
269 struct vmx_msr_entry *guest_msrs;
270 struct vmx_msr_entry *host_msrs;
271
272 struct list_head free_pages;
273 struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES];
274 struct kvm_mmu mmu;
275
714b93da
AK
276 struct kvm_mmu_memory_cache mmu_pte_chain_cache;
277 struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
278
86a5ba02
AK
279 gfn_t last_pt_write_gfn;
280 int last_pt_write_count;
281
6aa8b732
AK
282 struct kvm_guest_debug guest_debug;
283
284 char fx_buf[FX_BUF_SIZE];
285 char *host_fx_image;
286 char *guest_fx_image;
287
288 int mmio_needed;
289 int mmio_read_completed;
290 int mmio_is_write;
291 int mmio_size;
292 unsigned char mmio_data[8];
293 gpa_t mmio_phys_addr;
039576c0
AK
294 struct kvm_pio_request pio;
295 void *pio_data;
6aa8b732 296
1961d276
AK
297 int sigset_active;
298 sigset_t sigset;
299
6aa8b732
AK
300 struct {
301 int active;
302 u8 save_iopl;
303 struct kvm_save_segment {
304 u16 selector;
305 unsigned long base;
306 u32 limit;
307 u32 ar;
308 } tr, es, ds, fs, gs;
309 } rmode;
06465c5a
AK
310
311 int cpuid_nent;
312 struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES];
6aa8b732
AK
313};
314
315struct kvm_memory_slot {
316 gfn_t base_gfn;
317 unsigned long npages;
318 unsigned long flags;
319 struct page **phys_mem;
320 unsigned long *dirty_bitmap;
321};
322
323struct kvm {
324 spinlock_t lock; /* protects everything except vcpus */
325 int nmemslots;
326 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS];
cea0f0e7
AK
327 /*
328 * Hash table of struct kvm_mmu_page.
329 */
6aa8b732 330 struct list_head active_mmu_pages;
ebeace86 331 int n_free_mmu_pages;
cea0f0e7 332 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
6aa8b732
AK
333 struct kvm_vcpu vcpus[KVM_MAX_VCPUS];
334 int memory_config_version;
335 int busy;
cd4a4e53 336 unsigned long rmap_overflow;
133de902 337 struct list_head vm_list;
bccf2150 338 struct file *filp;
6aa8b732
AK
339};
340
341struct kvm_stat {
342 u32 pf_fixed;
343 u32 pf_guest;
344 u32 tlb_flush;
345 u32 invlpg;
346
347 u32 exits;
348 u32 io_exits;
349 u32 mmio_exits;
350 u32 signal_exits;
c1150d8c
DL
351 u32 irq_window_exits;
352 u32 halt_exits;
353 u32 request_irq_exits;
6aa8b732
AK
354 u32 irq_exits;
355};
356
357struct descriptor_table {
358 u16 limit;
359 unsigned long base;
360} __attribute__((packed));
361
362struct kvm_arch_ops {
363 int (*cpu_has_kvm_support)(void); /* __init */
364 int (*disabled_by_bios)(void); /* __init */
365 void (*hardware_enable)(void *dummy); /* __init */
366 void (*hardware_disable)(void *dummy);
367 int (*hardware_setup)(void); /* __init */
368 void (*hardware_unsetup)(void); /* __exit */
369
370 int (*vcpu_create)(struct kvm_vcpu *vcpu);
371 void (*vcpu_free)(struct kvm_vcpu *vcpu);
372
bccf2150 373 void (*vcpu_load)(struct kvm_vcpu *vcpu);
6aa8b732 374 void (*vcpu_put)(struct kvm_vcpu *vcpu);
774c47f1 375 void (*vcpu_decache)(struct kvm_vcpu *vcpu);
6aa8b732
AK
376
377 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
378 struct kvm_debug_guest *dbg);
379 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
380 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
381 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
382 void (*get_segment)(struct kvm_vcpu *vcpu,
383 struct kvm_segment *var, int seg);
384 void (*set_segment)(struct kvm_vcpu *vcpu,
385 struct kvm_segment *var, int seg);
6aa8b732 386 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
399badf3 387 void (*decache_cr0_cr4_guest_bits)(struct kvm_vcpu *vcpu);
6aa8b732 388 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
6aa8b732
AK
389 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
390 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
391 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
392 void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
393 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
394 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
395 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
396 unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
397 void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
398 int *exception);
399 void (*cache_regs)(struct kvm_vcpu *vcpu);
400 void (*decache_regs)(struct kvm_vcpu *vcpu);
401 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
402 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
403
404 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t addr);
405 void (*tlb_flush)(struct kvm_vcpu *vcpu);
406 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
407 unsigned long addr, u32 err_code);
408
409 void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code);
410
411 int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
412 int (*vcpu_setup)(struct kvm_vcpu *vcpu);
413 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
102d8325
IM
414 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
415 unsigned char *hypercall_addr);
6aa8b732
AK
416};
417
418extern struct kvm_stat kvm_stat;
419extern struct kvm_arch_ops *kvm_arch_ops;
420
421#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
422#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
423
424int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module);
425void kvm_exit_arch(void);
426
427void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
8018c27b
IM
428int kvm_mmu_create(struct kvm_vcpu *vcpu);
429int kvm_mmu_setup(struct kvm_vcpu *vcpu);
6aa8b732
AK
430
431int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
714b93da 432void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot);
e0fa826f 433void kvm_mmu_zap_all(struct kvm_vcpu *vcpu);
6aa8b732
AK
434
435hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa);
436#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
437#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
438static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
439hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva);
039576c0 440struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
6aa8b732
AK
441
442void kvm_emulator_want_group7_invlpg(void);
443
444extern hpa_t bad_page_address;
445
446static inline struct page *gfn_to_page(struct kvm_memory_slot *slot, gfn_t gfn)
447{
448 return slot->phys_mem[gfn - slot->base_gfn];
449}
450
451struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
452void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
453
454enum emulation_result {
455 EMULATE_DONE, /* no further processing */
456 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
457 EMULATE_FAIL, /* can't emulate this instruction */
458};
459
460int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
461 unsigned long cr2, u16 error_code);
462void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
463void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
464void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
465 unsigned long *rflags);
466
467unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
468void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
469 unsigned long *rflags);
470
471struct x86_emulate_ctxt;
472
039576c0
AK
473int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
474 int size, unsigned long count, int string, int down,
475 gva_t address, int rep, unsigned port);
06465c5a 476void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
6aa8b732
AK
477int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
478int emulate_clts(struct kvm_vcpu *vcpu);
479int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr,
480 unsigned long *dest);
481int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
482 unsigned long value);
483
484void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
485void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
486void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
487void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
488void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
489
3bab1f5d
AK
490int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
491int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
6aa8b732
AK
492
493void fx_init(struct kvm_vcpu *vcpu);
494
495void load_msrs(struct vmx_msr_entry *e, int n);
496void save_msrs(struct vmx_msr_entry *e, int n);
497void kvm_resched(struct kvm_vcpu *vcpu);
498
499int kvm_read_guest(struct kvm_vcpu *vcpu,
500 gva_t addr,
501 unsigned long size,
502 void *dest);
503
504int kvm_write_guest(struct kvm_vcpu *vcpu,
505 gva_t addr,
506 unsigned long size,
507 void *data);
508
509unsigned long segment_base(u16 selector);
510
da4a00f0
AK
511void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
512void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
a436036b 513int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
ebeace86
AK
514void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
515
270fd9b9
AK
516int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run);
517
ebeace86
AK
518static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
519 u32 error_code)
520{
521 if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
522 kvm_mmu_free_some_pages(vcpu);
523 return vcpu->mmu.page_fault(vcpu, gva, error_code);
524}
da4a00f0 525
6aa8b732
AK
526static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn)
527{
528 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
529 return (slot) ? slot->phys_mem[gfn - slot->base_gfn] : NULL;
530}
531
a9058ecd
AK
532static inline int is_long_mode(struct kvm_vcpu *vcpu)
533{
534#ifdef CONFIG_X86_64
535 return vcpu->shadow_efer & EFER_LME;
536#else
537 return 0;
538#endif
539}
540
6aa8b732
AK
541static inline int is_pae(struct kvm_vcpu *vcpu)
542{
543 return vcpu->cr4 & CR4_PAE_MASK;
544}
545
546static inline int is_pse(struct kvm_vcpu *vcpu)
547{
548 return vcpu->cr4 & CR4_PSE_MASK;
549}
550
551static inline int is_paging(struct kvm_vcpu *vcpu)
552{
553 return vcpu->cr0 & CR0_PG_MASK;
554}
555
556static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
557{
558 return slot - kvm->memslots;
559}
560
561static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
562{
563 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
564
5972e953 565 return (struct kvm_mmu_page *)page_private(page);
6aa8b732
AK
566}
567
568static inline u16 read_fs(void)
569{
570 u16 seg;
571 asm ("mov %%fs, %0" : "=g"(seg));
572 return seg;
573}
574
575static inline u16 read_gs(void)
576{
577 u16 seg;
578 asm ("mov %%gs, %0" : "=g"(seg));
579 return seg;
580}
581
582static inline u16 read_ldt(void)
583{
584 u16 ldt;
585 asm ("sldt %0" : "=g"(ldt));
586 return ldt;
587}
588
589static inline void load_fs(u16 sel)
590{
591 asm ("mov %0, %%fs" : : "rm"(sel));
592}
593
594static inline void load_gs(u16 sel)
595{
596 asm ("mov %0, %%gs" : : "rm"(sel));
597}
598
599#ifndef load_ldt
600static inline void load_ldt(u16 sel)
601{
a0610ddf 602 asm ("lldt %0" : : "rm"(sel));
6aa8b732
AK
603}
604#endif
605
606static inline void get_idt(struct descriptor_table *table)
607{
608 asm ("sidt %0" : "=m"(*table));
609}
610
611static inline void get_gdt(struct descriptor_table *table)
612{
613 asm ("sgdt %0" : "=m"(*table));
614}
615
616static inline unsigned long read_tr_base(void)
617{
618 u16 tr;
619 asm ("str %0" : "=g"(tr));
620 return segment_base(tr);
621}
622
05b3e0c2 623#ifdef CONFIG_X86_64
6aa8b732
AK
624static inline unsigned long read_msr(unsigned long msr)
625{
626 u64 value;
627
628 rdmsrl(msr, value);
629 return value;
630}
631#endif
632
633static inline void fx_save(void *image)
634{
635 asm ("fxsave (%0)":: "r" (image));
636}
637
638static inline void fx_restore(void *image)
639{
640 asm ("fxrstor (%0)":: "r" (image));
641}
642
643static inline void fpu_init(void)
644{
645 asm ("finit");
646}
647
648static inline u32 get_rdx_init_val(void)
649{
650 return 0x600; /* P6 family */
651}
652
653#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
654#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
655#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
656#define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
657#define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
658#define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
659#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
660#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
661#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
662
663#define MSR_IA32_TIME_STAMP_COUNTER 0x010
664
665#define TSS_IOPB_BASE_OFFSET 0x66
666#define TSS_BASE_SIZE 0x68
667#define TSS_IOPB_SIZE (65536 / 8)
668#define TSS_REDIRECTION_SIZE (256 / 8)
669#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
670
6aa8b732 671#endif