Commit | Line | Data |
---|---|---|
043405e1 CO |
1 | #/* |
2 | * Kernel-based Virtual Machine driver for Linux | |
3 | * | |
4 | * This header defines architecture specific interfaces, x86 version | |
5 | * | |
6 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
7 | * the COPYING file in the top-level directory. | |
8 | * | |
9 | */ | |
10 | ||
11 | #ifndef KVM_X86_H | |
12 | #define KVM_X86_H | |
13 | ||
14 | #include "kvm.h" | |
15 | ||
34c16eec ZX |
16 | #include <linux/types.h> |
17 | #include <linux/mm.h> | |
18 | ||
19 | #include <linux/kvm.h> | |
20 | #include <linux/kvm_para.h> | |
21 | ||
cd6e8f87 ZX |
22 | #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) |
23 | #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) | |
24 | #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL) | |
25 | ||
26 | #define KVM_GUEST_CR0_MASK \ | |
27 | (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \ | |
28 | | X86_CR0_NW | X86_CR0_CD) | |
29 | #define KVM_VM_CR0_ALWAYS_ON \ | |
30 | (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \ | |
31 | | X86_CR0_MP) | |
32 | #define KVM_GUEST_CR4_MASK \ | |
33 | (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE) | |
34 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) | |
35 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) | |
36 | ||
37 | #define INVALID_PAGE (~(hpa_t)0) | |
38 | #define UNMAPPED_GVA (~(gpa_t)0) | |
39 | ||
40 | #define DE_VECTOR 0 | |
41 | #define UD_VECTOR 6 | |
42 | #define NM_VECTOR 7 | |
43 | #define DF_VECTOR 8 | |
44 | #define TS_VECTOR 10 | |
45 | #define NP_VECTOR 11 | |
46 | #define SS_VECTOR 12 | |
47 | #define GP_VECTOR 13 | |
48 | #define PF_VECTOR 14 | |
49 | ||
50 | #define SELECTOR_TI_MASK (1 << 2) | |
51 | #define SELECTOR_RPL_MASK 0x03 | |
52 | ||
53 | #define IOPL_SHIFT 12 | |
54 | ||
e9b11c17 ZX |
55 | extern spinlock_t kvm_lock; |
56 | extern struct list_head vm_list; | |
57 | ||
2b3ccfa0 ZX |
58 | enum { |
59 | VCPU_REGS_RAX = 0, | |
60 | VCPU_REGS_RCX = 1, | |
61 | VCPU_REGS_RDX = 2, | |
62 | VCPU_REGS_RBX = 3, | |
63 | VCPU_REGS_RSP = 4, | |
64 | VCPU_REGS_RBP = 5, | |
65 | VCPU_REGS_RSI = 6, | |
66 | VCPU_REGS_RDI = 7, | |
67 | #ifdef CONFIG_X86_64 | |
68 | VCPU_REGS_R8 = 8, | |
69 | VCPU_REGS_R9 = 9, | |
70 | VCPU_REGS_R10 = 10, | |
71 | VCPU_REGS_R11 = 11, | |
72 | VCPU_REGS_R12 = 12, | |
73 | VCPU_REGS_R13 = 13, | |
74 | VCPU_REGS_R14 = 14, | |
75 | VCPU_REGS_R15 = 15, | |
76 | #endif | |
77 | NR_VCPU_REGS | |
78 | }; | |
79 | ||
80 | enum { | |
81 | VCPU_SREG_CS, | |
82 | VCPU_SREG_DS, | |
83 | VCPU_SREG_ES, | |
84 | VCPU_SREG_FS, | |
85 | VCPU_SREG_GS, | |
86 | VCPU_SREG_SS, | |
87 | VCPU_SREG_TR, | |
88 | VCPU_SREG_LDTR, | |
89 | }; | |
90 | ||
91 | #include "x86_emulate.h" | |
92 | ||
34c16eec ZX |
93 | struct kvm_vcpu { |
94 | KVM_VCPU_COMM; | |
95 | u64 host_tsc; | |
96 | int interrupt_window_open; | |
97 | unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ | |
98 | DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS); | |
99 | unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */ | |
100 | unsigned long rip; /* needs vcpu_load_rsp_rip() */ | |
101 | ||
102 | unsigned long cr0; | |
103 | unsigned long cr2; | |
104 | unsigned long cr3; | |
105 | unsigned long cr4; | |
106 | unsigned long cr8; | |
107 | u64 pdptrs[4]; /* pae */ | |
108 | u64 shadow_efer; | |
109 | u64 apic_base; | |
110 | struct kvm_lapic *apic; /* kernel irqchip context */ | |
111 | #define VCPU_MP_STATE_RUNNABLE 0 | |
112 | #define VCPU_MP_STATE_UNINITIALIZED 1 | |
113 | #define VCPU_MP_STATE_INIT_RECEIVED 2 | |
114 | #define VCPU_MP_STATE_SIPI_RECEIVED 3 | |
115 | #define VCPU_MP_STATE_HALTED 4 | |
116 | int mp_state; | |
117 | int sipi_vector; | |
118 | u64 ia32_misc_enable_msr; | |
119 | ||
120 | struct kvm_mmu mmu; | |
121 | ||
122 | struct kvm_mmu_memory_cache mmu_pte_chain_cache; | |
123 | struct kvm_mmu_memory_cache mmu_rmap_desc_cache; | |
124 | struct kvm_mmu_memory_cache mmu_page_cache; | |
125 | struct kvm_mmu_memory_cache mmu_page_header_cache; | |
126 | ||
127 | gfn_t last_pt_write_gfn; | |
128 | int last_pt_write_count; | |
129 | u64 *last_pte_updated; | |
130 | ||
131 | ||
132 | struct i387_fxsave_struct host_fx_image; | |
133 | struct i387_fxsave_struct guest_fx_image; | |
134 | ||
135 | gva_t mmio_fault_cr2; | |
136 | struct kvm_pio_request pio; | |
137 | void *pio_data; | |
138 | ||
139 | struct { | |
140 | int active; | |
141 | u8 save_iopl; | |
142 | struct kvm_save_segment { | |
143 | u16 selector; | |
144 | unsigned long base; | |
145 | u32 limit; | |
146 | u32 ar; | |
147 | } tr, es, ds, fs, gs; | |
148 | } rmode; | |
149 | int halt_request; /* real mode on Intel only */ | |
150 | ||
151 | int cpuid_nent; | |
152 | struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES]; | |
153 | ||
154 | /* emulate context */ | |
155 | ||
156 | struct x86_emulate_ctxt emulate_ctxt; | |
157 | }; | |
158 | ||
ea4a5ff8 ZX |
159 | struct kvm_x86_ops { |
160 | int (*cpu_has_kvm_support)(void); /* __init */ | |
161 | int (*disabled_by_bios)(void); /* __init */ | |
162 | void (*hardware_enable)(void *dummy); /* __init */ | |
163 | void (*hardware_disable)(void *dummy); | |
164 | void (*check_processor_compatibility)(void *rtn); | |
165 | int (*hardware_setup)(void); /* __init */ | |
166 | void (*hardware_unsetup)(void); /* __exit */ | |
167 | ||
168 | /* Create, but do not attach this VCPU */ | |
169 | struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); | |
170 | void (*vcpu_free)(struct kvm_vcpu *vcpu); | |
171 | int (*vcpu_reset)(struct kvm_vcpu *vcpu); | |
172 | ||
173 | void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); | |
174 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); | |
175 | void (*vcpu_put)(struct kvm_vcpu *vcpu); | |
176 | void (*vcpu_decache)(struct kvm_vcpu *vcpu); | |
177 | ||
178 | int (*set_guest_debug)(struct kvm_vcpu *vcpu, | |
179 | struct kvm_debug_guest *dbg); | |
180 | void (*guest_debug_pre)(struct kvm_vcpu *vcpu); | |
181 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); | |
182 | int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); | |
183 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); | |
184 | void (*get_segment)(struct kvm_vcpu *vcpu, | |
185 | struct kvm_segment *var, int seg); | |
186 | void (*set_segment)(struct kvm_vcpu *vcpu, | |
187 | struct kvm_segment *var, int seg); | |
188 | void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); | |
189 | void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); | |
190 | void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); | |
191 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); | |
192 | void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); | |
193 | void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); | |
194 | void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | |
195 | void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | |
196 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | |
197 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | |
198 | unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr); | |
199 | void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value, | |
200 | int *exception); | |
201 | void (*cache_regs)(struct kvm_vcpu *vcpu); | |
202 | void (*decache_regs)(struct kvm_vcpu *vcpu); | |
203 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); | |
204 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); | |
205 | ||
206 | void (*tlb_flush)(struct kvm_vcpu *vcpu); | |
207 | void (*inject_page_fault)(struct kvm_vcpu *vcpu, | |
208 | unsigned long addr, u32 err_code); | |
209 | ||
210 | void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code); | |
211 | ||
212 | void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); | |
213 | int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); | |
214 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); | |
215 | void (*patch_hypercall)(struct kvm_vcpu *vcpu, | |
216 | unsigned char *hypercall_addr); | |
217 | int (*get_irq)(struct kvm_vcpu *vcpu); | |
218 | void (*set_irq)(struct kvm_vcpu *vcpu, int vec); | |
219 | void (*inject_pending_irq)(struct kvm_vcpu *vcpu); | |
220 | void (*inject_pending_vectors)(struct kvm_vcpu *vcpu, | |
221 | struct kvm_run *run); | |
222 | ||
223 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); | |
224 | }; | |
225 | ||
97896d04 ZX |
226 | extern struct kvm_x86_ops *kvm_x86_ops; |
227 | ||
54f1585a ZX |
228 | int kvm_mmu_module_init(void); |
229 | void kvm_mmu_module_exit(void); | |
230 | ||
231 | void kvm_mmu_destroy(struct kvm_vcpu *vcpu); | |
232 | int kvm_mmu_create(struct kvm_vcpu *vcpu); | |
233 | int kvm_mmu_setup(struct kvm_vcpu *vcpu); | |
234 | void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); | |
235 | ||
236 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); | |
237 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); | |
238 | void kvm_mmu_zap_all(struct kvm *kvm); | |
239 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); | |
240 | ||
241 | enum emulation_result { | |
242 | EMULATE_DONE, /* no further processing */ | |
243 | EMULATE_DO_MMIO, /* kvm_run filled with mmio request */ | |
244 | EMULATE_FAIL, /* can't emulate this instruction */ | |
245 | }; | |
246 | ||
247 | int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run, | |
248 | unsigned long cr2, u16 error_code, int no_decode); | |
249 | void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context); | |
250 | void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); | |
251 | void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); | |
252 | void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, | |
253 | unsigned long *rflags); | |
254 | ||
255 | unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr); | |
256 | void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value, | |
257 | unsigned long *rflags); | |
258 | int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); | |
259 | int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); | |
260 | ||
261 | struct x86_emulate_ctxt; | |
262 | ||
263 | int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |
264 | int size, unsigned port); | |
265 | int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |
266 | int size, unsigned long count, int down, | |
267 | gva_t address, int rep, unsigned port); | |
268 | void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); | |
269 | int kvm_emulate_halt(struct kvm_vcpu *vcpu); | |
270 | int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); | |
271 | int emulate_clts(struct kvm_vcpu *vcpu); | |
272 | int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, | |
273 | unsigned long *dest); | |
274 | int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, | |
275 | unsigned long value); | |
276 | ||
277 | void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); | |
278 | void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0); | |
279 | void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0); | |
280 | void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0); | |
281 | unsigned long get_cr8(struct kvm_vcpu *vcpu); | |
282 | void lmsw(struct kvm_vcpu *vcpu, unsigned long msw); | |
283 | void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); | |
284 | ||
285 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); | |
286 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); | |
287 | ||
288 | void fx_init(struct kvm_vcpu *vcpu); | |
289 | ||
290 | int emulator_read_std(unsigned long addr, | |
291 | void *val, | |
292 | unsigned int bytes, | |
293 | struct kvm_vcpu *vcpu); | |
294 | int emulator_write_emulated(unsigned long addr, | |
295 | const void *val, | |
296 | unsigned int bytes, | |
297 | struct kvm_vcpu *vcpu); | |
298 | ||
299 | unsigned long segment_base(u16 selector); | |
300 | ||
301 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |
302 | const u8 *new, int bytes); | |
303 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); | |
304 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); | |
305 | int kvm_mmu_load(struct kvm_vcpu *vcpu); | |
306 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); | |
307 | ||
308 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); | |
309 | ||
310 | int kvm_fix_hypercall(struct kvm_vcpu *vcpu); | |
311 | ||
3067714c | 312 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); |
34c16eec ZX |
313 | |
314 | static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | |
315 | { | |
316 | if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES)) | |
317 | __kvm_mmu_free_some_pages(vcpu); | |
318 | } | |
319 | ||
320 | static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) | |
321 | { | |
322 | if (likely(vcpu->mmu.root_hpa != INVALID_PAGE)) | |
323 | return 0; | |
324 | ||
325 | return kvm_mmu_load(vcpu); | |
326 | } | |
327 | ||
328 | static inline int is_long_mode(struct kvm_vcpu *vcpu) | |
329 | { | |
330 | #ifdef CONFIG_X86_64 | |
331 | return vcpu->shadow_efer & EFER_LME; | |
332 | #else | |
333 | return 0; | |
334 | #endif | |
335 | } | |
336 | ||
337 | static inline int is_pae(struct kvm_vcpu *vcpu) | |
338 | { | |
339 | return vcpu->cr4 & X86_CR4_PAE; | |
340 | } | |
341 | ||
342 | static inline int is_pse(struct kvm_vcpu *vcpu) | |
343 | { | |
344 | return vcpu->cr4 & X86_CR4_PSE; | |
345 | } | |
346 | ||
347 | static inline int is_paging(struct kvm_vcpu *vcpu) | |
348 | { | |
349 | return vcpu->cr0 & X86_CR0_PG; | |
350 | } | |
351 | ||
a03490ed | 352 | int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); |
de7d789a | 353 | int complete_pio(struct kvm_vcpu *vcpu); |
ec6d273d ZX |
354 | |
355 | static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) | |
356 | { | |
357 | struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); | |
358 | ||
359 | return (struct kvm_mmu_page *)page_private(page); | |
360 | } | |
361 | ||
362 | static inline u16 read_fs(void) | |
363 | { | |
364 | u16 seg; | |
365 | asm("mov %%fs, %0" : "=g"(seg)); | |
366 | return seg; | |
367 | } | |
368 | ||
369 | static inline u16 read_gs(void) | |
370 | { | |
371 | u16 seg; | |
372 | asm("mov %%gs, %0" : "=g"(seg)); | |
373 | return seg; | |
374 | } | |
375 | ||
376 | static inline u16 read_ldt(void) | |
377 | { | |
378 | u16 ldt; | |
379 | asm("sldt %0" : "=g"(ldt)); | |
380 | return ldt; | |
381 | } | |
382 | ||
383 | static inline void load_fs(u16 sel) | |
384 | { | |
385 | asm("mov %0, %%fs" : : "rm"(sel)); | |
386 | } | |
387 | ||
388 | static inline void load_gs(u16 sel) | |
389 | { | |
390 | asm("mov %0, %%gs" : : "rm"(sel)); | |
391 | } | |
392 | ||
393 | #ifndef load_ldt | |
394 | static inline void load_ldt(u16 sel) | |
395 | { | |
396 | asm("lldt %0" : : "rm"(sel)); | |
397 | } | |
398 | #endif | |
399 | ||
400 | static inline void get_idt(struct descriptor_table *table) | |
401 | { | |
402 | asm("sidt %0" : "=m"(*table)); | |
403 | } | |
404 | ||
405 | static inline void get_gdt(struct descriptor_table *table) | |
406 | { | |
407 | asm("sgdt %0" : "=m"(*table)); | |
408 | } | |
409 | ||
410 | static inline unsigned long read_tr_base(void) | |
411 | { | |
412 | u16 tr; | |
413 | asm("str %0" : "=g"(tr)); | |
414 | return segment_base(tr); | |
415 | } | |
416 | ||
417 | #ifdef CONFIG_X86_64 | |
418 | static inline unsigned long read_msr(unsigned long msr) | |
419 | { | |
420 | u64 value; | |
421 | ||
422 | rdmsrl(msr, value); | |
423 | return value; | |
424 | } | |
425 | #endif | |
426 | ||
427 | static inline void fx_save(struct i387_fxsave_struct *image) | |
428 | { | |
429 | asm("fxsave (%0)":: "r" (image)); | |
430 | } | |
431 | ||
432 | static inline void fx_restore(struct i387_fxsave_struct *image) | |
433 | { | |
434 | asm("fxrstor (%0)":: "r" (image)); | |
435 | } | |
436 | ||
437 | static inline void fpu_init(void) | |
438 | { | |
439 | asm("finit"); | |
440 | } | |
441 | ||
442 | static inline u32 get_rdx_init_val(void) | |
443 | { | |
444 | return 0x600; /* P6 family */ | |
445 | } | |
446 | ||
447 | #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30" | |
448 | #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2" | |
449 | #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3" | |
450 | #define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30" | |
451 | #define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0" | |
452 | #define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0" | |
453 | #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" | |
454 | #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" | |
455 | #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" | |
456 | ||
457 | #define MSR_IA32_TIME_STAMP_COUNTER 0x010 | |
458 | ||
459 | #define TSS_IOPB_BASE_OFFSET 0x66 | |
460 | #define TSS_BASE_SIZE 0x68 | |
461 | #define TSS_IOPB_SIZE (65536 / 8) | |
462 | #define TSS_REDIRECTION_SIZE (256 / 8) | |
463 | #define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) | |
043405e1 | 464 | #endif |