KVM: Use standard CR3 flags, tighten checking
[linux-block.git] / drivers / kvm / kvm.h
CommitLineData
6aa8b732
AK
1#ifndef __KVM_H
2#define __KVM_H
3
4/*
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9#include <linux/types.h>
10#include <linux/list.h>
11#include <linux/mutex.h>
12#include <linux/spinlock.h>
06ff0d37
MR
13#include <linux/signal.h>
14#include <linux/sched.h>
6aa8b732 15#include <linux/mm.h>
e8edc6e0 16#include <asm/signal.h>
6aa8b732
AK
17
18#include "vmx.h"
19#include <linux/kvm.h>
102d8325 20#include <linux/kvm_para.h>
6aa8b732 21
f802a307
RR
22#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
23#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
24#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL)
6aa8b732
AK
25
26#define CR4_VME_MASK (1ULL << 0)
27#define CR4_PSE_MASK (1ULL << 4)
28#define CR4_PAE_MASK (1ULL << 5)
29#define CR4_PGE_MASK (1ULL << 7)
30#define CR4_VMXE_MASK (1ULL << 13)
31
32#define KVM_GUEST_CR0_MASK \
707d92fa
RR
33 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
34 | X86_CR0_NW | X86_CR0_CD)
6aa8b732 35#define KVM_VM_CR0_ALWAYS_ON \
707d92fa
RR
36 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
37 | X86_CR0_MP)
6aa8b732
AK
38#define KVM_GUEST_CR4_MASK \
39 (CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK)
40#define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK)
41#define KVM_RMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK | CR4_VME_MASK)
42
43#define INVALID_PAGE (~(hpa_t)0)
44#define UNMAPPED_GVA (~(gpa_t)0)
45
ef9254df 46#define KVM_MAX_VCPUS 4
e8207547 47#define KVM_ALIAS_SLOTS 4
6aa8b732 48#define KVM_MEMORY_SLOTS 4
7494c0cc 49#define KVM_NUM_MMU_PAGES 1024
ebeace86
AK
50#define KVM_MIN_FREE_MMU_PAGES 5
51#define KVM_REFILL_PAGES 25
06465c5a 52#define KVM_MAX_CPUID_ENTRIES 40
6aa8b732
AK
53
54#define FX_IMAGE_SIZE 512
55#define FX_IMAGE_ALIGN 16
56#define FX_BUF_SIZE (2 * FX_IMAGE_SIZE + FX_IMAGE_ALIGN)
57
58#define DE_VECTOR 0
7807fa6c 59#define NM_VECTOR 7
6aa8b732
AK
60#define DF_VECTOR 8
61#define TS_VECTOR 10
62#define NP_VECTOR 11
63#define SS_VECTOR 12
64#define GP_VECTOR 13
65#define PF_VECTOR 14
66
67#define SELECTOR_TI_MASK (1 << 2)
68#define SELECTOR_RPL_MASK 0x03
69
70#define IOPL_SHIFT 12
71
039576c0
AK
72#define KVM_PIO_PAGE_OFFSET 1
73
d9e368d6
AK
74/*
75 * vcpu->requests bit members
76 */
77#define KVM_TLB_FLUSH 0
78
6aa8b732
AK
79/*
80 * Address types:
81 *
82 * gva - guest virtual address
83 * gpa - guest physical address
84 * gfn - guest frame number
85 * hva - host virtual address
86 * hpa - host physical address
87 * hfn - host frame number
88 */
89
90typedef unsigned long gva_t;
91typedef u64 gpa_t;
92typedef unsigned long gfn_t;
93
94typedef unsigned long hva_t;
95typedef u64 hpa_t;
96typedef unsigned long hfn_t;
97
cea0f0e7
AK
98#define NR_PTE_CHAIN_ENTRIES 5
99
100struct kvm_pte_chain {
101 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
102 struct hlist_node link;
103};
104
105/*
106 * kvm_mmu_page_role, below, is defined as:
107 *
108 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
109 * bits 4:7 - page table level for this shadow (1-4)
110 * bits 8:9 - page table quadrant for 2-level guests
111 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
d55e2cb2 112 * bits 17:19 - "access" - the user, writable, and nx bits of a huge page pde
cea0f0e7
AK
113 */
114union kvm_mmu_page_role {
115 unsigned word;
116 struct {
117 unsigned glevels : 4;
118 unsigned level : 4;
119 unsigned quadrant : 2;
120 unsigned pad_for_nice_hex_output : 6;
121 unsigned metaphysical : 1;
d55e2cb2 122 unsigned hugepage_access : 3;
cea0f0e7
AK
123 };
124};
125
6aa8b732
AK
126struct kvm_mmu_page {
127 struct list_head link;
cea0f0e7
AK
128 struct hlist_node hash_link;
129
130 /*
131 * The following two entries are used to key the shadow page in the
132 * hash table.
133 */
134 gfn_t gfn;
135 union kvm_mmu_page_role role;
136
47ad8e68 137 u64 *spt;
6aa8b732
AK
138 unsigned long slot_bitmap; /* One bit set per slot which has memory
139 * in this shadow page.
140 */
cea0f0e7 141 int multimapped; /* More than one parent_pte? */
3bb65a22 142 int root_count; /* Currently serving as active root */
cea0f0e7
AK
143 union {
144 u64 *parent_pte; /* !multimapped */
145 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
146 };
6aa8b732
AK
147};
148
149struct vmcs {
150 u32 revision_id;
151 u32 abort;
152 char data[0];
153};
154
155#define vmx_msr_entry kvm_msr_entry
156
157struct kvm_vcpu;
158
159/*
160 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
161 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
162 * mode.
163 */
164struct kvm_mmu {
165 void (*new_cr3)(struct kvm_vcpu *vcpu);
166 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
6aa8b732
AK
167 void (*free)(struct kvm_vcpu *vcpu);
168 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
169 hpa_t root_hpa;
170 int root_level;
171 int shadow_root_level;
17ac10ad
AK
172
173 u64 *pae_root;
6aa8b732
AK
174};
175
714b93da
AK
176#define KVM_NR_MEM_OBJS 20
177
178struct kvm_mmu_memory_cache {
179 int nobjs;
180 void *objects[KVM_NR_MEM_OBJS];
181};
182
183/*
184 * We don't want allocation failures within the mmu code, so we preallocate
185 * enough memory for a single page fault in a cache.
186 */
6aa8b732
AK
187struct kvm_guest_debug {
188 int enabled;
189 unsigned long bp[4];
190 int singlestep;
191};
192
193enum {
194 VCPU_REGS_RAX = 0,
195 VCPU_REGS_RCX = 1,
196 VCPU_REGS_RDX = 2,
197 VCPU_REGS_RBX = 3,
198 VCPU_REGS_RSP = 4,
199 VCPU_REGS_RBP = 5,
200 VCPU_REGS_RSI = 6,
201 VCPU_REGS_RDI = 7,
05b3e0c2 202#ifdef CONFIG_X86_64
6aa8b732
AK
203 VCPU_REGS_R8 = 8,
204 VCPU_REGS_R9 = 9,
205 VCPU_REGS_R10 = 10,
206 VCPU_REGS_R11 = 11,
207 VCPU_REGS_R12 = 12,
208 VCPU_REGS_R13 = 13,
209 VCPU_REGS_R14 = 14,
210 VCPU_REGS_R15 = 15,
211#endif
212 NR_VCPU_REGS
213};
214
215enum {
216 VCPU_SREG_CS,
217 VCPU_SREG_DS,
218 VCPU_SREG_ES,
219 VCPU_SREG_FS,
220 VCPU_SREG_GS,
221 VCPU_SREG_SS,
222 VCPU_SREG_TR,
223 VCPU_SREG_LDTR,
224};
225
039576c0
AK
226struct kvm_pio_request {
227 unsigned long count;
228 int cur_count;
229 struct page *guest_pages[2];
230 unsigned guest_page_offset;
231 int in;
74906345 232 int port;
039576c0
AK
233 int size;
234 int string;
235 int down;
236 int rep;
237};
238
1165f5fe
AK
239struct kvm_stat {
240 u32 pf_fixed;
241 u32 pf_guest;
242 u32 tlb_flush;
243 u32 invlpg;
244
245 u32 exits;
246 u32 io_exits;
247 u32 mmio_exits;
248 u32 signal_exits;
249 u32 irq_window_exits;
250 u32 halt_exits;
251 u32 request_irq_exits;
252 u32 irq_exits;
e6adf283 253 u32 light_exits;
2cc51560 254 u32 efer_reload;
1165f5fe
AK
255};
256
2eeb2e94
GH
257struct kvm_io_device {
258 void (*read)(struct kvm_io_device *this,
259 gpa_t addr,
260 int len,
261 void *val);
262 void (*write)(struct kvm_io_device *this,
263 gpa_t addr,
264 int len,
265 const void *val);
266 int (*in_range)(struct kvm_io_device *this, gpa_t addr);
267 void (*destructor)(struct kvm_io_device *this);
268
269 void *private;
270};
271
272static inline void kvm_iodevice_read(struct kvm_io_device *dev,
273 gpa_t addr,
274 int len,
275 void *val)
276{
277 dev->read(dev, addr, len, val);
278}
279
280static inline void kvm_iodevice_write(struct kvm_io_device *dev,
281 gpa_t addr,
282 int len,
283 const void *val)
284{
285 dev->write(dev, addr, len, val);
286}
287
288static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr)
289{
290 return dev->in_range(dev, addr);
291}
292
293static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
294{
74906345
ED
295 if (dev->destructor)
296 dev->destructor(dev);
2eeb2e94
GH
297}
298
299/*
300 * It would be nice to use something smarter than a linear search, TBD...
301 * Thankfully we dont expect many devices to register (famous last words :),
302 * so until then it will suffice. At least its abstracted so we can change
303 * in one place.
304 */
305struct kvm_io_bus {
306 int dev_count;
307#define NR_IOBUS_DEVS 6
308 struct kvm_io_device *devs[NR_IOBUS_DEVS];
309};
310
311void kvm_io_bus_init(struct kvm_io_bus *bus);
312void kvm_io_bus_destroy(struct kvm_io_bus *bus);
313struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);
314void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
315 struct kvm_io_device *dev);
316
6aa8b732
AK
317struct kvm_vcpu {
318 struct kvm *kvm;
dad3795d 319 int vcpu_id;
6aa8b732
AK
320 union {
321 struct vmcs *vmcs;
322 struct vcpu_svm *svm;
323 };
324 struct mutex mutex;
325 int cpu;
326 int launched;
0cc5064d 327 u64 host_tsc;
9a2bb7f4 328 struct kvm_run *run;
c1150d8c 329 int interrupt_window_open;
d9e368d6
AK
330 int guest_mode;
331 unsigned long requests;
6aa8b732
AK
332 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
333#define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long)
334 unsigned long irq_pending[NR_IRQ_WORDS];
335 unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
336 unsigned long rip; /* needs vcpu_load_rsp_rip() */
337
338 unsigned long cr0;
339 unsigned long cr2;
340 unsigned long cr3;
102d8325
IM
341 gpa_t para_state_gpa;
342 struct page *para_state_page;
343 gpa_t hypercall_gpa;
6aa8b732
AK
344 unsigned long cr4;
345 unsigned long cr8;
1342d353 346 u64 pdptrs[4]; /* pae */
6aa8b732
AK
347 u64 shadow_efer;
348 u64 apic_base;
6f00e68f 349 u64 ia32_misc_enable_msr;
6aa8b732 350 int nmsrs;
a75beee6 351 int save_nmsrs;
2cc51560 352 int msr_offset_efer;
a75beee6
ED
353#ifdef CONFIG_X86_64
354 int msr_offset_kernel_gs_base;
355#endif
6aa8b732
AK
356 struct vmx_msr_entry *guest_msrs;
357 struct vmx_msr_entry *host_msrs;
358
6aa8b732
AK
359 struct kvm_mmu mmu;
360
714b93da
AK
361 struct kvm_mmu_memory_cache mmu_pte_chain_cache;
362 struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
d3d25b04
AK
363 struct kvm_mmu_memory_cache mmu_page_cache;
364 struct kvm_mmu_memory_cache mmu_page_header_cache;
714b93da 365
86a5ba02
AK
366 gfn_t last_pt_write_gfn;
367 int last_pt_write_count;
368
6aa8b732
AK
369 struct kvm_guest_debug guest_debug;
370
371 char fx_buf[FX_BUF_SIZE];
372 char *host_fx_image;
373 char *guest_fx_image;
7807fa6c 374 int fpu_active;
7702fd1f 375 int guest_fpu_loaded;
33ed6329
AK
376 struct vmx_host_state {
377 int loaded;
378 u16 fs_sel, gs_sel, ldt_sel;
379 int fs_gs_ldt_reload_needed;
380 } vmx_host_state;
6aa8b732
AK
381
382 int mmio_needed;
383 int mmio_read_completed;
384 int mmio_is_write;
385 int mmio_size;
386 unsigned char mmio_data[8];
387 gpa_t mmio_phys_addr;
e7df56e4 388 gva_t mmio_fault_cr2;
039576c0
AK
389 struct kvm_pio_request pio;
390 void *pio_data;
6aa8b732 391
1961d276
AK
392 int sigset_active;
393 sigset_t sigset;
394
1165f5fe
AK
395 struct kvm_stat stat;
396
6aa8b732
AK
397 struct {
398 int active;
399 u8 save_iopl;
400 struct kvm_save_segment {
401 u16 selector;
402 unsigned long base;
403 u32 limit;
404 u32 ar;
405 } tr, es, ds, fs, gs;
406 } rmode;
72d6e5a0 407 int halt_request; /* real mode on Intel only */
06465c5a
AK
408
409 int cpuid_nent;
410 struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES];
6aa8b732
AK
411};
412
e8207547
AK
413struct kvm_mem_alias {
414 gfn_t base_gfn;
415 unsigned long npages;
416 gfn_t target_gfn;
417};
418
6aa8b732
AK
419struct kvm_memory_slot {
420 gfn_t base_gfn;
421 unsigned long npages;
422 unsigned long flags;
423 struct page **phys_mem;
424 unsigned long *dirty_bitmap;
425};
426
427struct kvm {
428 spinlock_t lock; /* protects everything except vcpus */
e8207547
AK
429 int naliases;
430 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
6aa8b732
AK
431 int nmemslots;
432 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS];
cea0f0e7
AK
433 /*
434 * Hash table of struct kvm_mmu_page.
435 */
6aa8b732 436 struct list_head active_mmu_pages;
ebeace86 437 int n_free_mmu_pages;
cea0f0e7 438 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
39c3b86e 439 int nvcpus;
6aa8b732
AK
440 struct kvm_vcpu vcpus[KVM_MAX_VCPUS];
441 int memory_config_version;
442 int busy;
cd4a4e53 443 unsigned long rmap_overflow;
133de902 444 struct list_head vm_list;
bccf2150 445 struct file *filp;
2eeb2e94 446 struct kvm_io_bus mmio_bus;
74906345 447 struct kvm_io_bus pio_bus;
6aa8b732
AK
448};
449
6aa8b732
AK
450struct descriptor_table {
451 u16 limit;
452 unsigned long base;
453} __attribute__((packed));
454
455struct kvm_arch_ops {
456 int (*cpu_has_kvm_support)(void); /* __init */
457 int (*disabled_by_bios)(void); /* __init */
458 void (*hardware_enable)(void *dummy); /* __init */
459 void (*hardware_disable)(void *dummy);
460 int (*hardware_setup)(void); /* __init */
461 void (*hardware_unsetup)(void); /* __exit */
462
463 int (*vcpu_create)(struct kvm_vcpu *vcpu);
464 void (*vcpu_free)(struct kvm_vcpu *vcpu);
465
bccf2150 466 void (*vcpu_load)(struct kvm_vcpu *vcpu);
6aa8b732 467 void (*vcpu_put)(struct kvm_vcpu *vcpu);
774c47f1 468 void (*vcpu_decache)(struct kvm_vcpu *vcpu);
6aa8b732
AK
469
470 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
471 struct kvm_debug_guest *dbg);
472 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
473 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
474 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
475 void (*get_segment)(struct kvm_vcpu *vcpu,
476 struct kvm_segment *var, int seg);
477 void (*set_segment)(struct kvm_vcpu *vcpu,
478 struct kvm_segment *var, int seg);
6aa8b732 479 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
25c4c276 480 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
6aa8b732 481 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
6aa8b732
AK
482 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
483 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
484 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
485 void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
486 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
487 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
488 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
489 unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
490 void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
491 int *exception);
492 void (*cache_regs)(struct kvm_vcpu *vcpu);
493 void (*decache_regs)(struct kvm_vcpu *vcpu);
494 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
495 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
496
497 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t addr);
498 void (*tlb_flush)(struct kvm_vcpu *vcpu);
499 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
500 unsigned long addr, u32 err_code);
501
502 void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code);
503
504 int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
505 int (*vcpu_setup)(struct kvm_vcpu *vcpu);
506 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
102d8325
IM
507 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
508 unsigned char *hypercall_addr);
6aa8b732
AK
509};
510
6aa8b732
AK
511extern struct kvm_arch_ops *kvm_arch_ops;
512
513#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
514#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
515
516int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module);
517void kvm_exit_arch(void);
518
b5a33a75
AK
519int kvm_mmu_module_init(void);
520void kvm_mmu_module_exit(void);
521
6aa8b732 522void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
8018c27b
IM
523int kvm_mmu_create(struct kvm_vcpu *vcpu);
524int kvm_mmu_setup(struct kvm_vcpu *vcpu);
6aa8b732
AK
525
526int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
90cb0529
AK
527void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
528void kvm_mmu_zap_all(struct kvm *kvm);
6aa8b732
AK
529
530hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa);
531#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
532#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
533static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
534hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva);
039576c0 535struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
6aa8b732
AK
536
537void kvm_emulator_want_group7_invlpg(void);
538
539extern hpa_t bad_page_address;
540
954bbbc2 541struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
6aa8b732
AK
542struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
543void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
544
545enum emulation_result {
546 EMULATE_DONE, /* no further processing */
547 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
548 EMULATE_FAIL, /* can't emulate this instruction */
549};
550
551int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
552 unsigned long cr2, u16 error_code);
553void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
554void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
555void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
556 unsigned long *rflags);
557
558unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
559void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
560 unsigned long *rflags);
35f3f286
AK
561int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
562int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
6aa8b732
AK
563
564struct x86_emulate_ctxt;
565
039576c0
AK
566int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
567 int size, unsigned long count, int string, int down,
568 gva_t address, int rep, unsigned port);
06465c5a 569void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
d3bef15f 570int kvm_emulate_halt(struct kvm_vcpu *vcpu);
6aa8b732
AK
571int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
572int emulate_clts(struct kvm_vcpu *vcpu);
573int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr,
574 unsigned long *dest);
575int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
576 unsigned long value);
577
578void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
579void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
580void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
581void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
582void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
583
3bab1f5d
AK
584int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
585int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
6aa8b732
AK
586
587void fx_init(struct kvm_vcpu *vcpu);
588
589void load_msrs(struct vmx_msr_entry *e, int n);
590void save_msrs(struct vmx_msr_entry *e, int n);
591void kvm_resched(struct kvm_vcpu *vcpu);
7702fd1f
AK
592void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
593void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
d9e368d6 594void kvm_flush_remote_tlbs(struct kvm *kvm);
6aa8b732
AK
595
596int kvm_read_guest(struct kvm_vcpu *vcpu,
597 gva_t addr,
598 unsigned long size,
599 void *dest);
600
601int kvm_write_guest(struct kvm_vcpu *vcpu,
602 gva_t addr,
603 unsigned long size,
604 void *data);
605
606unsigned long segment_base(u16 selector);
607
09072daf
AK
608void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
609 const u8 *old, const u8 *new, int bytes);
a436036b 610int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
22d95b12 611void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
17c3ba9d
AK
612int kvm_mmu_load(struct kvm_vcpu *vcpu);
613void kvm_mmu_unload(struct kvm_vcpu *vcpu);
ebeace86 614
270fd9b9
AK
615int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run);
616
ebeace86
AK
617static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
618 u32 error_code)
619{
ebeace86
AK
620 return vcpu->mmu.page_fault(vcpu, gva, error_code);
621}
da4a00f0 622
22d95b12
AK
623static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
624{
625 if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
626 __kvm_mmu_free_some_pages(vcpu);
627}
628
17c3ba9d
AK
629static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
630{
631 if (likely(vcpu->mmu.root_hpa != INVALID_PAGE))
632 return 0;
633
634 return kvm_mmu_load(vcpu);
635}
636
a9058ecd
AK
637static inline int is_long_mode(struct kvm_vcpu *vcpu)
638{
639#ifdef CONFIG_X86_64
640 return vcpu->shadow_efer & EFER_LME;
641#else
642 return 0;
643#endif
644}
645
6aa8b732
AK
646static inline int is_pae(struct kvm_vcpu *vcpu)
647{
648 return vcpu->cr4 & CR4_PAE_MASK;
649}
650
651static inline int is_pse(struct kvm_vcpu *vcpu)
652{
653 return vcpu->cr4 & CR4_PSE_MASK;
654}
655
656static inline int is_paging(struct kvm_vcpu *vcpu)
657{
707d92fa 658 return vcpu->cr0 & X86_CR0_PG;
6aa8b732
AK
659}
660
661static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
662{
663 return slot - kvm->memslots;
664}
665
666static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
667{
668 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
669
5972e953 670 return (struct kvm_mmu_page *)page_private(page);
6aa8b732
AK
671}
672
673static inline u16 read_fs(void)
674{
675 u16 seg;
676 asm ("mov %%fs, %0" : "=g"(seg));
677 return seg;
678}
679
680static inline u16 read_gs(void)
681{
682 u16 seg;
683 asm ("mov %%gs, %0" : "=g"(seg));
684 return seg;
685}
686
687static inline u16 read_ldt(void)
688{
689 u16 ldt;
690 asm ("sldt %0" : "=g"(ldt));
691 return ldt;
692}
693
694static inline void load_fs(u16 sel)
695{
696 asm ("mov %0, %%fs" : : "rm"(sel));
697}
698
699static inline void load_gs(u16 sel)
700{
701 asm ("mov %0, %%gs" : : "rm"(sel));
702}
703
704#ifndef load_ldt
705static inline void load_ldt(u16 sel)
706{
a0610ddf 707 asm ("lldt %0" : : "rm"(sel));
6aa8b732
AK
708}
709#endif
710
711static inline void get_idt(struct descriptor_table *table)
712{
713 asm ("sidt %0" : "=m"(*table));
714}
715
716static inline void get_gdt(struct descriptor_table *table)
717{
718 asm ("sgdt %0" : "=m"(*table));
719}
720
721static inline unsigned long read_tr_base(void)
722{
723 u16 tr;
724 asm ("str %0" : "=g"(tr));
725 return segment_base(tr);
726}
727
05b3e0c2 728#ifdef CONFIG_X86_64
6aa8b732
AK
729static inline unsigned long read_msr(unsigned long msr)
730{
731 u64 value;
732
733 rdmsrl(msr, value);
734 return value;
735}
736#endif
737
738static inline void fx_save(void *image)
739{
740 asm ("fxsave (%0)":: "r" (image));
741}
742
743static inline void fx_restore(void *image)
744{
745 asm ("fxrstor (%0)":: "r" (image));
746}
747
748static inline void fpu_init(void)
749{
750 asm ("finit");
751}
752
753static inline u32 get_rdx_init_val(void)
754{
755 return 0x600; /* P6 family */
756}
757
758#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
759#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
760#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
761#define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
762#define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
763#define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
764#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
765#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
766#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
767
768#define MSR_IA32_TIME_STAMP_COUNTER 0x010
769
770#define TSS_IOPB_BASE_OFFSET 0x66
771#define TSS_BASE_SIZE 0x68
772#define TSS_IOPB_SIZE (65536 / 8)
773#define TSS_REDIRECTION_SIZE (256 / 8)
774#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
775
6aa8b732 776#endif