KVM: Move kvm_vm_ioctl_create_vcpu() around
[linux-block.git] / drivers / kvm / kvm.h
CommitLineData
6aa8b732
AK
1#ifndef __KVM_H
2#define __KVM_H
3
4/*
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9#include <linux/types.h>
10#include <linux/list.h>
11#include <linux/mutex.h>
12#include <linux/spinlock.h>
13#include <linux/mm.h>
14
15#include "vmx.h"
16#include <linux/kvm.h>
102d8325 17#include <linux/kvm_para.h>
6aa8b732
AK
18
19#define CR0_PE_MASK (1ULL << 0)
20#define CR0_TS_MASK (1ULL << 3)
21#define CR0_NE_MASK (1ULL << 5)
22#define CR0_WP_MASK (1ULL << 16)
23#define CR0_NW_MASK (1ULL << 29)
24#define CR0_CD_MASK (1ULL << 30)
25#define CR0_PG_MASK (1ULL << 31)
26
27#define CR3_WPT_MASK (1ULL << 3)
28#define CR3_PCD_MASK (1ULL << 4)
29
30#define CR3_RESEVED_BITS 0x07ULL
31#define CR3_L_MODE_RESEVED_BITS (~((1ULL << 40) - 1) | 0x0fe7ULL)
32#define CR3_FLAGS_MASK ((1ULL << 5) - 1)
33
34#define CR4_VME_MASK (1ULL << 0)
35#define CR4_PSE_MASK (1ULL << 4)
36#define CR4_PAE_MASK (1ULL << 5)
37#define CR4_PGE_MASK (1ULL << 7)
38#define CR4_VMXE_MASK (1ULL << 13)
39
40#define KVM_GUEST_CR0_MASK \
41 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK \
42 | CR0_NW_MASK | CR0_CD_MASK)
43#define KVM_VM_CR0_ALWAYS_ON \
44 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK)
45#define KVM_GUEST_CR4_MASK \
46 (CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK)
47#define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK)
48#define KVM_RMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK | CR4_VME_MASK)
49
50#define INVALID_PAGE (~(hpa_t)0)
51#define UNMAPPED_GVA (~(gpa_t)0)
52
53#define KVM_MAX_VCPUS 1
54#define KVM_MEMORY_SLOTS 4
55#define KVM_NUM_MMU_PAGES 256
ebeace86
AK
56#define KVM_MIN_FREE_MMU_PAGES 5
57#define KVM_REFILL_PAGES 25
6aa8b732
AK
58
59#define FX_IMAGE_SIZE 512
60#define FX_IMAGE_ALIGN 16
61#define FX_BUF_SIZE (2 * FX_IMAGE_SIZE + FX_IMAGE_ALIGN)
62
63#define DE_VECTOR 0
64#define DF_VECTOR 8
65#define TS_VECTOR 10
66#define NP_VECTOR 11
67#define SS_VECTOR 12
68#define GP_VECTOR 13
69#define PF_VECTOR 14
70
71#define SELECTOR_TI_MASK (1 << 2)
72#define SELECTOR_RPL_MASK 0x03
73
74#define IOPL_SHIFT 12
75
76/*
77 * Address types:
78 *
79 * gva - guest virtual address
80 * gpa - guest physical address
81 * gfn - guest frame number
82 * hva - host virtual address
83 * hpa - host physical address
84 * hfn - host frame number
85 */
86
87typedef unsigned long gva_t;
88typedef u64 gpa_t;
89typedef unsigned long gfn_t;
90
91typedef unsigned long hva_t;
92typedef u64 hpa_t;
93typedef unsigned long hfn_t;
94
cea0f0e7
AK
95#define NR_PTE_CHAIN_ENTRIES 5
96
97struct kvm_pte_chain {
98 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
99 struct hlist_node link;
100};
101
102/*
103 * kvm_mmu_page_role, below, is defined as:
104 *
105 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
106 * bits 4:7 - page table level for this shadow (1-4)
107 * bits 8:9 - page table quadrant for 2-level guests
108 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
109 */
110union kvm_mmu_page_role {
111 unsigned word;
112 struct {
113 unsigned glevels : 4;
114 unsigned level : 4;
115 unsigned quadrant : 2;
116 unsigned pad_for_nice_hex_output : 6;
117 unsigned metaphysical : 1;
118 };
119};
120
6aa8b732
AK
121struct kvm_mmu_page {
122 struct list_head link;
cea0f0e7
AK
123 struct hlist_node hash_link;
124
125 /*
126 * The following two entries are used to key the shadow page in the
127 * hash table.
128 */
129 gfn_t gfn;
130 union kvm_mmu_page_role role;
131
6aa8b732
AK
132 hpa_t page_hpa;
133 unsigned long slot_bitmap; /* One bit set per slot which has memory
134 * in this shadow page.
135 */
136 int global; /* Set if all ptes in this page are global */
cea0f0e7 137 int multimapped; /* More than one parent_pte? */
3bb65a22 138 int root_count; /* Currently serving as active root */
cea0f0e7
AK
139 union {
140 u64 *parent_pte; /* !multimapped */
141 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
142 };
6aa8b732
AK
143};
144
145struct vmcs {
146 u32 revision_id;
147 u32 abort;
148 char data[0];
149};
150
151#define vmx_msr_entry kvm_msr_entry
152
153struct kvm_vcpu;
154
155/*
156 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
157 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
158 * mode.
159 */
160struct kvm_mmu {
161 void (*new_cr3)(struct kvm_vcpu *vcpu);
162 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
6aa8b732
AK
163 void (*free)(struct kvm_vcpu *vcpu);
164 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
165 hpa_t root_hpa;
166 int root_level;
167 int shadow_root_level;
17ac10ad
AK
168
169 u64 *pae_root;
6aa8b732
AK
170};
171
714b93da
AK
172#define KVM_NR_MEM_OBJS 20
173
174struct kvm_mmu_memory_cache {
175 int nobjs;
176 void *objects[KVM_NR_MEM_OBJS];
177};
178
179/*
180 * We don't want allocation failures within the mmu code, so we preallocate
181 * enough memory for a single page fault in a cache.
182 */
6aa8b732
AK
183struct kvm_guest_debug {
184 int enabled;
185 unsigned long bp[4];
186 int singlestep;
187};
188
189enum {
190 VCPU_REGS_RAX = 0,
191 VCPU_REGS_RCX = 1,
192 VCPU_REGS_RDX = 2,
193 VCPU_REGS_RBX = 3,
194 VCPU_REGS_RSP = 4,
195 VCPU_REGS_RBP = 5,
196 VCPU_REGS_RSI = 6,
197 VCPU_REGS_RDI = 7,
05b3e0c2 198#ifdef CONFIG_X86_64
6aa8b732
AK
199 VCPU_REGS_R8 = 8,
200 VCPU_REGS_R9 = 9,
201 VCPU_REGS_R10 = 10,
202 VCPU_REGS_R11 = 11,
203 VCPU_REGS_R12 = 12,
204 VCPU_REGS_R13 = 13,
205 VCPU_REGS_R14 = 14,
206 VCPU_REGS_R15 = 15,
207#endif
208 NR_VCPU_REGS
209};
210
211enum {
212 VCPU_SREG_CS,
213 VCPU_SREG_DS,
214 VCPU_SREG_ES,
215 VCPU_SREG_FS,
216 VCPU_SREG_GS,
217 VCPU_SREG_SS,
218 VCPU_SREG_TR,
219 VCPU_SREG_LDTR,
220};
221
222struct kvm_vcpu {
223 struct kvm *kvm;
224 union {
225 struct vmcs *vmcs;
226 struct vcpu_svm *svm;
227 };
228 struct mutex mutex;
229 int cpu;
230 int launched;
c1150d8c 231 int interrupt_window_open;
6aa8b732
AK
232 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
233#define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long)
234 unsigned long irq_pending[NR_IRQ_WORDS];
235 unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
236 unsigned long rip; /* needs vcpu_load_rsp_rip() */
237
238 unsigned long cr0;
239 unsigned long cr2;
240 unsigned long cr3;
102d8325
IM
241 gpa_t para_state_gpa;
242 struct page *para_state_page;
243 gpa_t hypercall_gpa;
6aa8b732
AK
244 unsigned long cr4;
245 unsigned long cr8;
1342d353 246 u64 pdptrs[4]; /* pae */
6aa8b732
AK
247 u64 shadow_efer;
248 u64 apic_base;
6f00e68f 249 u64 ia32_misc_enable_msr;
6aa8b732
AK
250 int nmsrs;
251 struct vmx_msr_entry *guest_msrs;
252 struct vmx_msr_entry *host_msrs;
253
254 struct list_head free_pages;
255 struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES];
256 struct kvm_mmu mmu;
257
714b93da
AK
258 struct kvm_mmu_memory_cache mmu_pte_chain_cache;
259 struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
260
86a5ba02
AK
261 gfn_t last_pt_write_gfn;
262 int last_pt_write_count;
263
6aa8b732
AK
264 struct kvm_guest_debug guest_debug;
265
266 char fx_buf[FX_BUF_SIZE];
267 char *host_fx_image;
268 char *guest_fx_image;
269
270 int mmio_needed;
271 int mmio_read_completed;
272 int mmio_is_write;
273 int mmio_size;
274 unsigned char mmio_data[8];
275 gpa_t mmio_phys_addr;
276
277 struct {
278 int active;
279 u8 save_iopl;
280 struct kvm_save_segment {
281 u16 selector;
282 unsigned long base;
283 u32 limit;
284 u32 ar;
285 } tr, es, ds, fs, gs;
286 } rmode;
287};
288
289struct kvm_memory_slot {
290 gfn_t base_gfn;
291 unsigned long npages;
292 unsigned long flags;
293 struct page **phys_mem;
294 unsigned long *dirty_bitmap;
295};
296
297struct kvm {
298 spinlock_t lock; /* protects everything except vcpus */
299 int nmemslots;
300 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS];
cea0f0e7
AK
301 /*
302 * Hash table of struct kvm_mmu_page.
303 */
6aa8b732 304 struct list_head active_mmu_pages;
ebeace86 305 int n_free_mmu_pages;
cea0f0e7 306 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
6aa8b732
AK
307 struct kvm_vcpu vcpus[KVM_MAX_VCPUS];
308 int memory_config_version;
309 int busy;
cd4a4e53 310 unsigned long rmap_overflow;
133de902 311 struct list_head vm_list;
6aa8b732
AK
312};
313
314struct kvm_stat {
315 u32 pf_fixed;
316 u32 pf_guest;
317 u32 tlb_flush;
318 u32 invlpg;
319
320 u32 exits;
321 u32 io_exits;
322 u32 mmio_exits;
323 u32 signal_exits;
c1150d8c
DL
324 u32 irq_window_exits;
325 u32 halt_exits;
326 u32 request_irq_exits;
6aa8b732
AK
327 u32 irq_exits;
328};
329
330struct descriptor_table {
331 u16 limit;
332 unsigned long base;
333} __attribute__((packed));
334
335struct kvm_arch_ops {
336 int (*cpu_has_kvm_support)(void); /* __init */
337 int (*disabled_by_bios)(void); /* __init */
338 void (*hardware_enable)(void *dummy); /* __init */
339 void (*hardware_disable)(void *dummy);
340 int (*hardware_setup)(void); /* __init */
341 void (*hardware_unsetup)(void); /* __exit */
342
343 int (*vcpu_create)(struct kvm_vcpu *vcpu);
344 void (*vcpu_free)(struct kvm_vcpu *vcpu);
345
346 struct kvm_vcpu *(*vcpu_load)(struct kvm_vcpu *vcpu);
347 void (*vcpu_put)(struct kvm_vcpu *vcpu);
774c47f1 348 void (*vcpu_decache)(struct kvm_vcpu *vcpu);
6aa8b732
AK
349
350 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
351 struct kvm_debug_guest *dbg);
352 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
353 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
354 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
355 void (*get_segment)(struct kvm_vcpu *vcpu,
356 struct kvm_segment *var, int seg);
357 void (*set_segment)(struct kvm_vcpu *vcpu,
358 struct kvm_segment *var, int seg);
6aa8b732 359 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
399badf3 360 void (*decache_cr0_cr4_guest_bits)(struct kvm_vcpu *vcpu);
6aa8b732
AK
361 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
362 void (*set_cr0_no_modeswitch)(struct kvm_vcpu *vcpu,
363 unsigned long cr0);
364 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
365 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
366 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
367 void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
368 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
369 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
370 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
371 unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
372 void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
373 int *exception);
374 void (*cache_regs)(struct kvm_vcpu *vcpu);
375 void (*decache_regs)(struct kvm_vcpu *vcpu);
376 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
377 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
378
379 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t addr);
380 void (*tlb_flush)(struct kvm_vcpu *vcpu);
381 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
382 unsigned long addr, u32 err_code);
383
384 void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code);
385
386 int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
387 int (*vcpu_setup)(struct kvm_vcpu *vcpu);
388 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
102d8325
IM
389 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
390 unsigned char *hypercall_addr);
6aa8b732
AK
391};
392
393extern struct kvm_stat kvm_stat;
394extern struct kvm_arch_ops *kvm_arch_ops;
395
396#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
397#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
398
399int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module);
400void kvm_exit_arch(void);
401
402void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
8018c27b
IM
403int kvm_mmu_create(struct kvm_vcpu *vcpu);
404int kvm_mmu_setup(struct kvm_vcpu *vcpu);
6aa8b732
AK
405
406int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
714b93da 407void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot);
6aa8b732
AK
408
409hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa);
410#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
411#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
412static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
413hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva);
414
415void kvm_emulator_want_group7_invlpg(void);
416
417extern hpa_t bad_page_address;
418
419static inline struct page *gfn_to_page(struct kvm_memory_slot *slot, gfn_t gfn)
420{
421 return slot->phys_mem[gfn - slot->base_gfn];
422}
423
424struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
425void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
426
427enum emulation_result {
428 EMULATE_DONE, /* no further processing */
429 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
430 EMULATE_FAIL, /* can't emulate this instruction */
431};
432
433int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
434 unsigned long cr2, u16 error_code);
435void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
436void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
437void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
438 unsigned long *rflags);
439
440unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
441void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
442 unsigned long *rflags);
443
444struct x86_emulate_ctxt;
445
446int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
447int emulate_clts(struct kvm_vcpu *vcpu);
448int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr,
449 unsigned long *dest);
450int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
451 unsigned long value);
452
453void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
454void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
455void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
456void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
457void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
458
3bab1f5d
AK
459int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
460int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
6aa8b732
AK
461
462void fx_init(struct kvm_vcpu *vcpu);
463
464void load_msrs(struct vmx_msr_entry *e, int n);
465void save_msrs(struct vmx_msr_entry *e, int n);
466void kvm_resched(struct kvm_vcpu *vcpu);
467
468int kvm_read_guest(struct kvm_vcpu *vcpu,
469 gva_t addr,
470 unsigned long size,
471 void *dest);
472
473int kvm_write_guest(struct kvm_vcpu *vcpu,
474 gva_t addr,
475 unsigned long size,
476 void *data);
477
478unsigned long segment_base(u16 selector);
479
da4a00f0
AK
480void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
481void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
a436036b 482int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
ebeace86
AK
483void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
484
270fd9b9
AK
485int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run);
486
ebeace86
AK
487static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
488 u32 error_code)
489{
490 if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
491 kvm_mmu_free_some_pages(vcpu);
492 return vcpu->mmu.page_fault(vcpu, gva, error_code);
493}
da4a00f0 494
6aa8b732
AK
495static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn)
496{
497 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
498 return (slot) ? slot->phys_mem[gfn - slot->base_gfn] : NULL;
499}
500
a9058ecd
AK
501static inline int is_long_mode(struct kvm_vcpu *vcpu)
502{
503#ifdef CONFIG_X86_64
504 return vcpu->shadow_efer & EFER_LME;
505#else
506 return 0;
507#endif
508}
509
6aa8b732
AK
510static inline int is_pae(struct kvm_vcpu *vcpu)
511{
512 return vcpu->cr4 & CR4_PAE_MASK;
513}
514
515static inline int is_pse(struct kvm_vcpu *vcpu)
516{
517 return vcpu->cr4 & CR4_PSE_MASK;
518}
519
520static inline int is_paging(struct kvm_vcpu *vcpu)
521{
522 return vcpu->cr0 & CR0_PG_MASK;
523}
524
525static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
526{
527 return slot - kvm->memslots;
528}
529
530static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
531{
532 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
533
5972e953 534 return (struct kvm_mmu_page *)page_private(page);
6aa8b732
AK
535}
536
537static inline u16 read_fs(void)
538{
539 u16 seg;
540 asm ("mov %%fs, %0" : "=g"(seg));
541 return seg;
542}
543
544static inline u16 read_gs(void)
545{
546 u16 seg;
547 asm ("mov %%gs, %0" : "=g"(seg));
548 return seg;
549}
550
551static inline u16 read_ldt(void)
552{
553 u16 ldt;
554 asm ("sldt %0" : "=g"(ldt));
555 return ldt;
556}
557
558static inline void load_fs(u16 sel)
559{
560 asm ("mov %0, %%fs" : : "rm"(sel));
561}
562
563static inline void load_gs(u16 sel)
564{
565 asm ("mov %0, %%gs" : : "rm"(sel));
566}
567
568#ifndef load_ldt
569static inline void load_ldt(u16 sel)
570{
a0610ddf 571 asm ("lldt %0" : : "rm"(sel));
6aa8b732
AK
572}
573#endif
574
575static inline void get_idt(struct descriptor_table *table)
576{
577 asm ("sidt %0" : "=m"(*table));
578}
579
580static inline void get_gdt(struct descriptor_table *table)
581{
582 asm ("sgdt %0" : "=m"(*table));
583}
584
585static inline unsigned long read_tr_base(void)
586{
587 u16 tr;
588 asm ("str %0" : "=g"(tr));
589 return segment_base(tr);
590}
591
05b3e0c2 592#ifdef CONFIG_X86_64
6aa8b732
AK
593static inline unsigned long read_msr(unsigned long msr)
594{
595 u64 value;
596
597 rdmsrl(msr, value);
598 return value;
599}
600#endif
601
602static inline void fx_save(void *image)
603{
604 asm ("fxsave (%0)":: "r" (image));
605}
606
607static inline void fx_restore(void *image)
608{
609 asm ("fxrstor (%0)":: "r" (image));
610}
611
612static inline void fpu_init(void)
613{
614 asm ("finit");
615}
616
617static inline u32 get_rdx_init_val(void)
618{
619 return 0x600; /* P6 family */
620}
621
622#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
623#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
624#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
625#define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
626#define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
627#define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
628#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
629#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
630#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
631
632#define MSR_IA32_TIME_STAMP_COUNTER 0x010
633
634#define TSS_IOPB_BASE_OFFSET 0x66
635#define TSS_BASE_SIZE 0x68
636#define TSS_IOPB_SIZE (65536 / 8)
637#define TSS_REDIRECTION_SIZE (256 / 8)
638#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
639
6aa8b732 640#endif