5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
17 #include <linux/preempt.h>
18 #include <asm/signal.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_para.h>
23 #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
24 #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
25 #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL)
27 #define KVM_GUEST_CR0_MASK \
28 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
29 | X86_CR0_NW | X86_CR0_CD)
30 #define KVM_VM_CR0_ALWAYS_ON \
31 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
33 #define KVM_GUEST_CR4_MASK \
34 (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
35 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
36 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
38 #define INVALID_PAGE (~(hpa_t)0)
39 #define UNMAPPED_GVA (~(gpa_t)0)
41 #define KVM_MAX_VCPUS 4
42 #define KVM_ALIAS_SLOTS 4
43 #define KVM_MEMORY_SLOTS 8
44 #define KVM_PERMILLE_MMU_PAGES 20
45 #define KVM_MIN_ALLOC_MMU_PAGES 64
46 #define KVM_NUM_MMU_PAGES 1024
47 #define KVM_MIN_FREE_MMU_PAGES 5
48 #define KVM_REFILL_PAGES 25
49 #define KVM_MAX_CPUID_ENTRIES 40
61 #define SELECTOR_TI_MASK (1 << 2)
62 #define SELECTOR_RPL_MASK 0x03
66 #define KVM_PIO_PAGE_OFFSET 1
69 * vcpu->requests bit members
71 #define KVM_REQ_TLB_FLUSH 0
76 * gva - guest virtual address
77 * gpa - guest physical address
78 * gfn - guest frame number
79 * hva - host virtual address
80 * hpa - host physical address
81 * hfn - host frame number
84 typedef unsigned long gva_t;
86 typedef unsigned long gfn_t;
88 typedef unsigned long hva_t;
90 typedef unsigned long hfn_t;
92 #define NR_PTE_CHAIN_ENTRIES 5
94 struct kvm_pte_chain {
95 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
96 struct hlist_node link;
100 * kvm_mmu_page_role, below, is defined as:
102 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
103 * bits 4:7 - page table level for this shadow (1-4)
104 * bits 8:9 - page table quadrant for 2-level guests
105 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
106 * bits 17:19 - "access" - the user, writable, and nx bits of a huge page pde
108 union kvm_mmu_page_role {
111 unsigned glevels : 4;
113 unsigned quadrant : 2;
114 unsigned pad_for_nice_hex_output : 6;
115 unsigned metaphysical : 1;
116 unsigned hugepage_access : 3;
120 struct kvm_mmu_page {
121 struct list_head link;
122 struct hlist_node hash_link;
125 * The following two entries are used to key the shadow page in the
129 union kvm_mmu_page_role role;
132 /* hold the gfn of each spte inside spt */
134 unsigned long slot_bitmap; /* One bit set per slot which has memory
135 * in this shadow page.
137 int multimapped; /* More than one parent_pte? */
138 int root_count; /* Currently serving as active root */
140 u64 *parent_pte; /* !multimapped */
141 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
146 extern struct kmem_cache *kvm_vcpu_cache;
149 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
150 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
154 void (*new_cr3)(struct kvm_vcpu *vcpu);
155 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
156 void (*free)(struct kvm_vcpu *vcpu);
157 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
158 void (*prefetch_page)(struct kvm_vcpu *vcpu,
159 struct kvm_mmu_page *page);
162 int shadow_root_level;
167 #define KVM_NR_MEM_OBJS 40
169 struct kvm_mmu_memory_cache {
171 void *objects[KVM_NR_MEM_OBJS];
175 * We don't want allocation failures within the mmu code, so we preallocate
176 * enough memory for a single page fault in a cache.
178 struct kvm_guest_debug {
217 #include "x86_emulate.h"
219 struct kvm_pio_request {
222 struct page *guest_pages[2];
223 unsigned guest_page_offset;
242 u32 irq_window_exits;
245 u32 request_irq_exits;
251 struct kvm_io_device {
252 void (*read)(struct kvm_io_device *this,
256 void (*write)(struct kvm_io_device *this,
260 int (*in_range)(struct kvm_io_device *this, gpa_t addr);
261 void (*destructor)(struct kvm_io_device *this);
266 static inline void kvm_iodevice_read(struct kvm_io_device *dev,
271 dev->read(dev, addr, len, val);
274 static inline void kvm_iodevice_write(struct kvm_io_device *dev,
279 dev->write(dev, addr, len, val);
282 static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr)
284 return dev->in_range(dev, addr);
287 static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
290 dev->destructor(dev);
294 * It would be nice to use something smarter than a linear search, TBD...
295 * Thankfully we dont expect many devices to register (famous last words :),
296 * so until then it will suffice. At least its abstracted so we can change
301 #define NR_IOBUS_DEVS 6
302 struct kvm_io_device *devs[NR_IOBUS_DEVS];
305 void kvm_io_bus_init(struct kvm_io_bus *bus);
306 void kvm_io_bus_destroy(struct kvm_io_bus *bus);
307 struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);
308 void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
309 struct kvm_io_device *dev);
311 #ifdef CONFIG_HAS_IOMEM
312 #define KVM_VCPU_MMIO \
314 int mmio_read_completed; \
317 unsigned char mmio_data[8]; \
318 gpa_t mmio_phys_addr;
321 #define KVM_VCPU_MMIO
325 #define KVM_VCPU_COMM \
327 struct preempt_notifier preempt_notifier; \
329 struct mutex mutex; \
331 struct kvm_run *run; \
333 unsigned long requests; \
334 struct kvm_guest_debug guest_debug; \
336 int guest_fpu_loaded; \
337 wait_queue_head_t wq; \
340 struct kvm_stat stat; \
343 struct kvm_mem_alias {
345 unsigned long npages;
349 struct kvm_memory_slot {
351 unsigned long npages;
354 unsigned long *dirty_bitmap;
355 unsigned long userspace_addr;
360 struct mutex lock; /* protects everything except vcpus */
362 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
364 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS];
366 * Hash table of struct kvm_mmu_page.
368 struct list_head active_mmu_pages;
369 unsigned int n_free_mmu_pages;
370 unsigned int n_requested_mmu_pages;
371 unsigned int n_alloc_mmu_pages;
372 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
373 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
374 unsigned long rmap_overflow;
375 struct list_head vm_list;
377 struct kvm_io_bus mmio_bus;
378 struct kvm_io_bus pio_bus;
379 struct kvm_pic *vpic;
380 struct kvm_ioapic *vioapic;
381 int round_robin_prev_vcpu;
384 static inline struct kvm_pic *pic_irqchip(struct kvm *kvm)
389 static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
394 static inline int irqchip_in_kernel(struct kvm *kvm)
396 return pic_irqchip(kvm) != 0;
399 struct descriptor_table {
402 } __attribute__((packed));
405 int (*cpu_has_kvm_support)(void); /* __init */
406 int (*disabled_by_bios)(void); /* __init */
407 void (*hardware_enable)(void *dummy); /* __init */
408 void (*hardware_disable)(void *dummy);
409 void (*check_processor_compatibility)(void *rtn);
410 int (*hardware_setup)(void); /* __init */
411 void (*hardware_unsetup)(void); /* __exit */
413 /* Create, but do not attach this VCPU */
414 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
415 void (*vcpu_free)(struct kvm_vcpu *vcpu);
416 int (*vcpu_reset)(struct kvm_vcpu *vcpu);
418 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
419 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
420 void (*vcpu_put)(struct kvm_vcpu *vcpu);
421 void (*vcpu_decache)(struct kvm_vcpu *vcpu);
423 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
424 struct kvm_debug_guest *dbg);
425 void (*guest_debug_pre)(struct kvm_vcpu *vcpu);
426 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
427 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
428 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
429 void (*get_segment)(struct kvm_vcpu *vcpu,
430 struct kvm_segment *var, int seg);
431 void (*set_segment)(struct kvm_vcpu *vcpu,
432 struct kvm_segment *var, int seg);
433 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
434 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
435 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
436 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
437 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
438 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
439 void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
440 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
441 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
442 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
443 unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
444 void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
446 void (*cache_regs)(struct kvm_vcpu *vcpu);
447 void (*decache_regs)(struct kvm_vcpu *vcpu);
448 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
449 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
451 void (*tlb_flush)(struct kvm_vcpu *vcpu);
452 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
453 unsigned long addr, u32 err_code);
455 void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code);
457 void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
458 int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
459 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
460 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
461 unsigned char *hypercall_addr);
462 int (*get_irq)(struct kvm_vcpu *vcpu);
463 void (*set_irq)(struct kvm_vcpu *vcpu, int vec);
464 void (*inject_pending_irq)(struct kvm_vcpu *vcpu);
465 void (*inject_pending_vectors)(struct kvm_vcpu *vcpu,
466 struct kvm_run *run);
469 extern struct kvm_x86_ops *kvm_x86_ops;
471 /* The guest did something we don't support. */
472 #define pr_unimpl(vcpu, fmt, ...) \
474 if (printk_ratelimit()) \
475 printk(KERN_ERR "kvm: %i: cpu%i " fmt, \
476 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
479 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
480 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
482 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
483 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
485 void vcpu_load(struct kvm_vcpu *vcpu);
486 void vcpu_put(struct kvm_vcpu *vcpu);
489 int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
490 struct module *module);
491 void kvm_exit_x86(void);
493 int kvm_mmu_module_init(void);
494 void kvm_mmu_module_exit(void);
496 void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
497 int kvm_mmu_create(struct kvm_vcpu *vcpu);
498 int kvm_mmu_setup(struct kvm_vcpu *vcpu);
499 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
501 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
502 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
503 void kvm_mmu_zap_all(struct kvm *kvm);
504 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
506 hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa);
507 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
508 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
509 static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
510 hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva);
511 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
513 extern struct page *bad_page;
515 int is_error_page(struct page *page);
516 int kvm_set_memory_region(struct kvm *kvm,
517 struct kvm_userspace_memory_region *mem,
519 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
520 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
521 void kvm_release_page(struct page *page);
522 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
524 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
525 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
526 int offset, int len);
527 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
529 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
530 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
531 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
532 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
534 enum emulation_result {
535 EMULATE_DONE, /* no further processing */
536 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
537 EMULATE_FAIL, /* can't emulate this instruction */
540 int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
541 unsigned long cr2, u16 error_code, int no_decode);
542 void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context);
543 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
544 void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
545 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
546 unsigned long *rflags);
548 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
549 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
550 unsigned long *rflags);
551 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
552 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
554 struct x86_emulate_ctxt;
556 int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
557 int size, unsigned port);
558 int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
559 int size, unsigned long count, int down,
560 gva_t address, int rep, unsigned port);
561 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
562 int kvm_emulate_halt(struct kvm_vcpu *vcpu);
563 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
564 int emulate_clts(struct kvm_vcpu *vcpu);
565 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
566 unsigned long *dest);
567 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
568 unsigned long value);
570 void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
571 void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
572 void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
573 void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
574 unsigned long get_cr8(struct kvm_vcpu *vcpu);
575 void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
576 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
578 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
579 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
581 void fx_init(struct kvm_vcpu *vcpu);
583 void kvm_resched(struct kvm_vcpu *vcpu);
584 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
585 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
586 void kvm_flush_remote_tlbs(struct kvm *kvm);
588 int emulator_read_std(unsigned long addr,
591 struct kvm_vcpu *vcpu);
592 int emulator_write_emulated(unsigned long addr,
595 struct kvm_vcpu *vcpu);
597 unsigned long segment_base(u16 selector);
599 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
600 const u8 *new, int bytes);
601 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
602 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
603 int kvm_mmu_load(struct kvm_vcpu *vcpu);
604 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
606 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
608 int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
610 long kvm_arch_dev_ioctl(struct file *filp,
611 unsigned int ioctl, unsigned long arg);
612 long kvm_arch_vcpu_ioctl(struct file *filp,
613 unsigned int ioctl, unsigned long arg);
614 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
615 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
617 __init void kvm_arch_init(void);
619 static inline void kvm_guest_enter(void)
621 account_system_vtime(current);
622 current->flags |= PF_VCPU;
625 static inline void kvm_guest_exit(void)
627 account_system_vtime(current);
628 current->flags &= ~PF_VCPU;
631 static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
633 return slot - kvm->memslots;
636 static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
638 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
640 return (struct kvm_mmu_page *)page_private(page);
643 static inline u16 read_fs(void)
646 asm("mov %%fs, %0" : "=g"(seg));
650 static inline u16 read_gs(void)
653 asm("mov %%gs, %0" : "=g"(seg));
657 static inline u16 read_ldt(void)
660 asm("sldt %0" : "=g"(ldt));
664 static inline void load_fs(u16 sel)
666 asm("mov %0, %%fs" : : "rm"(sel));
669 static inline void load_gs(u16 sel)
671 asm("mov %0, %%gs" : : "rm"(sel));
675 static inline void load_ldt(u16 sel)
677 asm("lldt %0" : : "rm"(sel));
681 static inline void get_idt(struct descriptor_table *table)
683 asm("sidt %0" : "=m"(*table));
686 static inline void get_gdt(struct descriptor_table *table)
688 asm("sgdt %0" : "=m"(*table));
691 static inline unsigned long read_tr_base(void)
694 asm("str %0" : "=g"(tr));
695 return segment_base(tr);
699 static inline unsigned long read_msr(unsigned long msr)
708 static inline void fx_save(struct i387_fxsave_struct *image)
710 asm("fxsave (%0)":: "r" (image));
713 static inline void fx_restore(struct i387_fxsave_struct *image)
715 asm("fxrstor (%0)":: "r" (image));
718 static inline void fpu_init(void)
723 static inline u32 get_rdx_init_val(void)
725 return 0x600; /* P6 family */
728 #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
729 #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
730 #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
731 #define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
732 #define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
733 #define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
734 #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
735 #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
736 #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
738 #define MSR_IA32_TIME_STAMP_COUNTER 0x010
740 #define TSS_IOPB_BASE_OFFSET 0x66
741 #define TSS_BASE_SIZE 0x68
742 #define TSS_IOPB_SIZE (65536 / 8)
743 #define TSS_REDIRECTION_SIZE (256 / 8)
744 #define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)